diff --git a/bats_ai/core/admin/pulse_metadata.py b/bats_ai/core/admin/pulse_metadata.py index d1d07e25..b6ba8922 100644 --- a/bats_ai/core/admin/pulse_metadata.py +++ b/bats_ai/core/admin/pulse_metadata.py @@ -5,5 +5,5 @@ @admin.register(PulseMetadata) class PulseMetadataAdmin(admin.ModelAdmin): - list_display = ('recording', 'index', 'bounding_box') + list_display = ('recording', 'index', 'bounding_box', 'curve', 'char_freq', 'knee', 'heel') list_select_related = True diff --git a/bats_ai/core/management/commands/copy_recordings.py b/bats_ai/core/management/commands/copy_recordings.py new file mode 100644 index 00000000..68c7a3c6 --- /dev/null +++ b/bats_ai/core/management/commands/copy_recordings.py @@ -0,0 +1,229 @@ +""" +Management command to create new recordings by copying existing ones with new names. + +Useful for generating test data: copies metadata and audio file from existing +recordings, assigns a new name and optional tags (default: test, foo, bar). +Reuses the source recording's spectrogram images and compressed spectrogram +(no recompute); copies RecordingAnnotations to the new recording. +""" + +import logging +import random + +from django.contrib.auth.models import User +from django.contrib.contenttypes.models import ContentType +from django.core.files.base import ContentFile +from django.core.files.storage import default_storage +from django.core.management.base import BaseCommand, CommandError +from django.db import transaction + +from bats_ai.core.models import ( + CompressedSpectrogram, + Recording, + RecordingAnnotation, + RecordingTag, + Spectrogram, + SpectrogramImage, +) + +logger = logging.getLogger(__name__) + +DEFAULT_TAGS = ['test', 'foo', 'bar'] + + +def _link_spectrogram_and_annotations(source_recording, new_recording): + spectrograms = list(Spectrogram.objects.filter(recording=source_recording).order_by('-created')) + if not spectrograms: + return + source_spectrogram = spectrograms[0] + + ct_spectrogram = ContentType.objects.get_for_model(Spectrogram) + ct_compressed = ContentType.objects.get_for_model(CompressedSpectrogram) + + # New Spectrogram (same dimensions) + new_spectrogram = Spectrogram.objects.create( + recording=new_recording, + width=source_spectrogram.width, + height=source_spectrogram.height, + duration=source_spectrogram.duration, + frequency_min=source_spectrogram.frequency_min, + frequency_max=source_spectrogram.frequency_max, + ) + # Link same image files: create SpectrogramImage rows pointing to source paths + for src_img in source_spectrogram.images.filter(type='spectrogram').order_by('index'): + new_img = SpectrogramImage( + content_type=ct_spectrogram, + object_id=new_spectrogram.id, + type='spectrogram', + index=src_img.index, + image_file=ContentFile(b' ', name='placeholder'), + ) + new_img.save() + old_path = new_img.image_file.name + SpectrogramImage.objects.filter(pk=new_img.pk).update(image_file=src_img.image_file.name) + if old_path and default_storage.exists(old_path): + default_storage.delete(old_path) + + # CompressedSpectrogram if present (most recent only) + compressed_qs = CompressedSpectrogram.objects.filter(recording=source_recording).order_by( + '-created' + )[:1] + for src_comp in compressed_qs: + new_comp = CompressedSpectrogram.objects.create( + recording=new_recording, + spectrogram=new_spectrogram, + length=src_comp.length, + starts=src_comp.starts, + stops=src_comp.stops, + widths=src_comp.widths, + cache_invalidated=src_comp.cache_invalidated, + ) + for src_img in src_comp.images.filter(type='compressed').order_by('index'): + new_img = SpectrogramImage( + content_type=ct_compressed, + object_id=new_comp.id, + type='compressed', + index=src_img.index, + image_file=ContentFile(b' ', name='placeholder'), + ) + new_img.save() + old_path = new_img.image_file.name + SpectrogramImage.objects.filter(pk=new_img.pk).update( + image_file=src_img.image_file.name + ) + if old_path and default_storage.exists(old_path): + default_storage.delete(old_path) + + # Copy RecordingAnnotations + for src_ann in RecordingAnnotation.objects.filter(recording=source_recording): + new_ann = RecordingAnnotation.objects.create( + recording=new_recording, + owner=new_recording.owner, + comments=src_ann.comments, + model=src_ann.model, + confidence=src_ann.confidence, + additional_data=src_ann.additional_data, + submitted=src_ann.submitted, + ) + new_ann.species.set(src_ann.species.all()) + + +class Command(BaseCommand): + help = ( + 'Create new recordings by copying existing ones with new names. ' + 'Optionally apply tags (default: test, foo, bar).' + ) + + def add_arguments(self, parser): + parser.add_argument( + '--count', + type=int, + default=1, + help='Number of new recordings to create (default: 1)', + ) + parser.add_argument( + '--tags', + type=str, + default=','.join(DEFAULT_TAGS), + help='Comma-separated tags to apply (default: test,foo,bar)', + ) + parser.add_argument( + '--owner', + type=str, + help='Username of the owner for the new recordings\ + (default: use source recording owner)', + ) + + def handle(self, *args, **options): + count = options['count'] + tags_raw = options['tags'] or ','.join(DEFAULT_TAGS) + tag_texts = [t.strip() for t in tags_raw.split(',') if t.strip()] + if not tag_texts: + tag_texts = DEFAULT_TAGS + owner_username = options.get('owner') + + if count < 1: + raise CommandError('--count must be at least 1.') + + recordings = list(Recording.objects.all().order_by('id')) + if not recordings: + raise CommandError('No existing recordings found. Create or import some first.') + + owner = None + if owner_username: + try: + owner = User.objects.get(username=owner_username) + except User.DoesNotExist: + raise CommandError(f'User not found: {owner_username}') + + created = [] + for i in range(count): + source = recordings[i % len(recordings)] + if owner is None: + owner = source.owner + + new_name = f'Copy of {source.name} ({i + 1})' + self.stdout.write( + f'Creating copy {i + 1}/{count}: {new_name} from recording id={source.pk}' + ) + + try: + with transaction.atomic(): + # Copy file content (works for local and remote storage) + source.audio_file.open('rb') + try: + file_content = source.audio_file.read() + finally: + source.audio_file.close() + + # Preserve extension if present + ext = '' + if source.audio_file.name and '.' in source.audio_file.name: + ext = '.' + source.audio_file.name.rsplit('.', 1)[-1] + save_name = new_name + ext if ext else new_name + + new_recording = Recording( + name=new_name, + owner=owner, + audio_file=ContentFile(file_content, name=save_name), + recorded_date=source.recorded_date, + recorded_time=source.recorded_time, + equipment=source.equipment, + comments=source.comments, + recording_location=source.recording_location, + grts_cell_id=source.grts_cell_id, + grts_cell=source.grts_cell, + public=source.public, + software=source.software, + detector=source.detector, + species_list=source.species_list, + site_name=source.site_name, + unusual_occurrences=source.unusual_occurrences, + ) + new_recording.save() + + # Apply a random subset of tags to this recording + k = random.randint(1, len(tag_texts)) + chosen = random.sample(tag_texts, k=k) + for text in chosen: + tag, _ = RecordingTag.objects.get_or_create(user=owner, text=text) + new_recording.tags.add(tag) + + # Reuse source spectrogram images and copy annotations (no recompute) + _link_spectrogram_and_annotations(source, new_recording) + + created.append(new_recording) + self.stdout.write( + self.style.SUCCESS(f' Created recording id={new_recording.pk}') + ) + self.stdout.write(' Linked spectrogram images and copied annotations.') + + except Exception as e: + self.stdout.write(self.style.ERROR(f' Failed: {e}')) + logger.exception('Error copying recording', exc_info=e) + + self.stdout.write('') + tag_str = ', '.join(tag_texts) + self.stdout.write( + self.style.SUCCESS(f'Done: created {len(created)} recording(s) with tags: {tag_str}') + ) diff --git a/bats_ai/core/migrations/0029_pulsemetadata_char_freq_pulsemetadata_curve_and_more.py b/bats_ai/core/migrations/0029_pulsemetadata_char_freq_pulsemetadata_curve_and_more.py new file mode 100644 index 00000000..33f0ee62 --- /dev/null +++ b/bats_ai/core/migrations/0029_pulsemetadata_char_freq_pulsemetadata_curve_and_more.py @@ -0,0 +1,35 @@ +# Generated by Django 4.2.23 on 2026-02-03 19:43 + +import django.contrib.gis.db.models.fields +from django.db import migrations + + +class Migration(migrations.Migration): + dependencies = [ + ('core', '0028_alter_spectrogramimage_type_pulsemetadata'), + ] + + operations = [ + migrations.AddField( + model_name='pulsemetadata', + name='char_freq', + field=django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326), + ), + migrations.AddField( + model_name='pulsemetadata', + name='curve', + field=django.contrib.gis.db.models.fields.LineStringField( + blank=True, null=True, srid=4326 + ), + ), + migrations.AddField( + model_name='pulsemetadata', + name='heel', + field=django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326), + ), + migrations.AddField( + model_name='pulsemetadata', + name='knee', + field=django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326), + ), + ] diff --git a/bats_ai/core/models/pulse_metadata.py b/bats_ai/core/models/pulse_metadata.py index b1dcc8bb..b3622fc6 100644 --- a/bats_ai/core/models/pulse_metadata.py +++ b/bats_ai/core/models/pulse_metadata.py @@ -8,4 +8,7 @@ class PulseMetadata(models.Model): index = models.IntegerField(null=False, blank=False) bounding_box = models.PolygonField(null=False, blank=False) contours = models.JSONField(null=True, blank=True) - # TODO: Add in metadata from batbot + curve = models.LineStringField(null=True, blank=True) + char_freq = models.PointField(null=True, blank=True) + knee = models.PointField(null=True, blank=True) + heel = models.PointField(null=True, blank=True) diff --git a/bats_ai/core/models/spectrogram_image.py b/bats_ai/core/models/spectrogram_image.py index 08b14ce1..afbce443 100644 --- a/bats_ai/core/models/spectrogram_image.py +++ b/bats_ai/core/models/spectrogram_image.py @@ -40,5 +40,10 @@ class Meta: @receiver(models.signals.pre_delete, sender=SpectrogramImage) def delete_content(sender, instance, **kwargs): - if instance.image_file: + if not instance.image_file: + return + # Only delete the file if no other SpectrogramImage references the same path + # (allows shared image references e.g. from copy_recordings management command) + same_path_count = sender.objects.filter(image_file=instance.image_file.name).count() + if same_path_count <= 1: instance.image_file.delete(save=False) diff --git a/bats_ai/core/tasks/nabat/tasks.py b/bats_ai/core/tasks/nabat/tasks.py index 932e8981..596a70fb 100644 --- a/bats_ai/core/tasks/nabat/tasks.py +++ b/bats_ai/core/tasks/nabat/tasks.py @@ -2,9 +2,10 @@ from pathlib import Path import tempfile +from django.contrib.gis.geos import LineString, Point, Polygon import requests -from bats_ai.core.models import Configuration, ProcessingTask, Species +from bats_ai.core.models import Configuration, ProcessingTask, PulseMetadata, Species from bats_ai.core.models.nabat import NABatRecording, NABatRecordingAnnotation from bats_ai.core.utils.batbot_metadata import generate_spectrogram_assets from bats_ai.utils.spectrogram_utils import ( @@ -57,6 +58,48 @@ def generate_spectrograms( compressed_obj = generate_nabat_compressed_spectrogram( nabat_recording, spectrogram, compressed ) + segment_index_map = {} + for segment in compressed['contours']['segments']: + pulse_metadata_obj, _ = PulseMetadata.objects.get_or_create( + recording=compressed_obj.recording, + index=segment['segment_index'], + defaults={ + 'contours': segment['contours'], + 'bounding_box': Polygon( + ( + (segment['start_ms'], segment['freq_max']), + (segment['stop_ms'], segment['freq_max']), + (segment['stop_ms'], segment['freq_min']), + (segment['start_ms'], segment['freq_min']), + (segment['start_ms'], segment['freq_max']), + ) + ), + }, + ) + segment_index_map[segment['segment_index']] = pulse_metadata_obj + for segment in compressed['segments']: + if segment['segment_index'] not in segment_index_map: + PulseMetadata.objects.update_or_create( + recording=compressed_obj.recording, + index=segment['segment_index'], + defaults={ + 'curve': LineString([Point(x[1], x[0]) for x in segment['curve_hz_ms']]), + 'char_freq': Point(segment['char_freq_ms'], segment['char_freq_hz']), + 'knee': Point(segment['knee_ms'], segment['knee_hz']), + 'heel': Point(segment['heel_ms'], segment['heel_hz']), + }, + ) + else: + pulse_metadata_obj = segment_index_map[segment['segment_index']] + pulse_metadata_obj.curve = LineString( + [Point(x[1], x[0]) for x in segment['curve_hz_ms']] + ) + pulse_metadata_obj.char_freq = Point( + segment['char_freq_ms'], segment['char_freq_hz'] + ) + pulse_metadata_obj.knee = Point(segment['knee_ms'], segment['knee_hz']) + pulse_metadata_obj.heel = Point(segment['heel_ms'], segment['heel_hz']) + pulse_metadata_obj.save() try: config = Configuration.objects.first() diff --git a/bats_ai/core/tasks/tasks.py b/bats_ai/core/tasks/tasks.py index 9afb3d61..f58ba6e5 100644 --- a/bats_ai/core/tasks/tasks.py +++ b/bats_ai/core/tasks/tasks.py @@ -4,7 +4,7 @@ import tempfile from django.contrib.contenttypes.models import ContentType -from django.contrib.gis.geos import Polygon +from django.contrib.gis.geos import LineString, Point, Polygon from django.core.files import File from bats_ai.celery import app @@ -104,8 +104,9 @@ def recording_compute_spectrogram(recording_id: int): ) # Create SpectrogramContour objects for each segment - for segment in results['segments']['segments']: - PulseMetadata.objects.update_or_create( + segment_index_map = {} + for segment in compressed['contours']['segments']: + pulse_metadata_obj, _ = PulseMetadata.objects.update_or_create( recording=compressed_obj.recording, index=segment['segment_index'], defaults={ @@ -121,6 +122,30 @@ def recording_compute_spectrogram(recording_id: int): ), }, ) + segment_index_map[segment['segment_index']] = pulse_metadata_obj + for segment in compressed['segments']: + if segment['segment_index'] not in segment_index_map: + PulseMetadata.objects.update_or_create( + recording=compressed_obj.recording, + index=segment['segment_index'], + defaults={ + 'curve': LineString([Point(x[1], x[0]) for x in segment['curve_hz_ms']]), + 'char_freq': Point(segment['char_freq_ms'], segment['char_freq_hz']), + 'knee': Point(segment['knee_ms'], segment['knee_hz']), + 'heel': Point(segment['heel_ms'], segment['heel_hz']), + }, + ) + else: + pulse_metadata_obj = segment_index_map[segment['segment_index']] + pulse_metadata_obj.curve = LineString( + [Point(x[1], x[0]) for x in segment['curve_hz_ms']] + ) + pulse_metadata_obj.char_freq = Point( + segment['char_freq_ms'], segment['char_freq_hz'] + ) + pulse_metadata_obj.knee = Point(segment['knee_ms'], segment['knee_hz']) + pulse_metadata_obj.heel = Point(segment['heel_ms'], segment['heel_hz']) + pulse_metadata_obj.save() config = Configuration.objects.first() # TODO: Disabled until prediction is in batbot diff --git a/bats_ai/core/utils/batbot_metadata.py b/bats_ai/core/utils/batbot_metadata.py index 730a5083..d7032ba1 100644 --- a/bats_ai/core/utils/batbot_metadata.py +++ b/bats_ai/core/utils/batbot_metadata.py @@ -1,5 +1,6 @@ from contextlib import contextmanager import json +import logging import os from pathlib import Path from typing import Any, TypedDict @@ -9,6 +10,8 @@ from .contour_utils import process_spectrogram_assets_for_contours +logger = logging.getLogger(__name__) + class SpectrogramMetadata(BaseModel): """Metadata about the spectrogram.""" @@ -255,6 +258,17 @@ class SpectrogramContourSegment(TypedDict): stop_ms: float +class BatBotMetadataCurve(TypedDict): + segment_index: int + curve_hz_ms: list[float] + char_freq_ms: float + char_freq_hz: float + knee_ms: float + knee_hz: float + heel_ms: float + heel_hz: float + + class SpectrogramContours(TypedDict): segments: list[SpectrogramContourSegment] total_segments: int @@ -266,7 +280,7 @@ class SpectrogramAssets(TypedDict): freq_max: int normal: SpectrogramAssetResult compressed: SpectrogramCompressedAssetResult - segments: SpectrogramContours | None + contours: SpectrogramContours | None @contextmanager @@ -279,6 +293,25 @@ def working_directory(path): os.chdir(previous) +def convert_to_segment_data( + metadata: BatbotMetadata, +) -> list[BatBotMetadataCurve]: + segment_data: list[BatBotMetadataCurve] = [] + for index, segment in enumerate(metadata.segments): + segment_data_item: BatBotMetadataCurve = { + 'segment_index': index, + 'curve_hz_ms': segment.curve_hz_ms, + 'char_freq_ms': segment.fc_ms, + 'char_freq_hz': segment.fc_hz, + 'knee_ms': segment.hi_fc_knee_ms, + 'knee_hz': segment.hi_fc_knee_hz, + 'heel_ms': segment.lo_fc_heel_ms, + 'heel_hz': segment.lo_fc_heel_hz, + } + segment_data.append(segment_data_item) + return segment_data + + def generate_spectrogram_assets(recording_path: str, output_folder: str): batbot.pipeline(recording_path, output_folder=output_folder) # There should be a .metadata.json file in the output_base directory by replacing extentions @@ -294,6 +327,7 @@ def generate_spectrogram_assets(recording_path: str, output_folder: str): metadata.frequencies.max_hz compressed_metadata = convert_to_compressed_spectrogram_data(metadata) + segment_curve_data = convert_to_segment_data(metadata) result: SpectrogramAssets = { 'duration': metadata.duration_ms, 'freq_min': metadata.frequencies.min_hz, @@ -311,10 +345,11 @@ def generate_spectrogram_assets(recording_path: str, output_folder: str): 'widths': compressed_metadata.widths, 'starts': compressed_metadata.starts, 'stops': compressed_metadata.stops, + 'segments': segment_curve_data, }, } - segments_data = process_spectrogram_assets_for_contours(result) - result['segments'] = segments_data + contour_segments_data = process_spectrogram_assets_for_contours(result) + result['compressed']['contours'] = contour_segments_data return result diff --git a/bats_ai/core/views/recording.py b/bats_ai/core/views/recording.py index 519b0355..ee24cd4d 100644 --- a/bats_ai/core/views/recording.py +++ b/bats_ai/core/views/recording.py @@ -1,15 +1,15 @@ from datetime import datetime import json import logging -from typing import Any +from typing import Any, Literal from django.contrib.auth.models import User from django.contrib.gis.geos import Point from django.contrib.postgres.aggregates import ArrayAgg from django.core.files.storage import default_storage -from django.db.models import Q +from django.db.models import Count, Exists, OuterRef, Prefetch, Q, QuerySet from django.http import HttpRequest -from ninja import File, Form, Schema +from ninja import File, Form, Query, Schema from ninja.files import UploadedFile from ninja.pagination import RouterPaginated @@ -22,6 +22,7 @@ RecordingTag, SequenceAnnotations, Species, + Spectrogram, ) from bats_ai.core.tasks.tasks import recording_compute_spectrogram from bats_ai.core.views.recording_tag import RecordingTagSchema @@ -51,6 +52,47 @@ class RecordingSchema(Schema): tags: list[RecordingTagSchema] = [] +class RecordingListQuerySchema(Schema): + """Query params for paginated recording list (v-data-table-server compatible).""" + + public: bool | None = None + exclude_submitted: bool | None = None + annotation_completed: bool | None = None + search: str | None = None + tags: str | None = None # Comma-separated tag texts; recording must have all listed tags + sort_by: ( + Literal['id', 'name', 'created', 'modified', 'recorded_date', 'owner_username'] | None + ) = 'created' + sort_direction: Literal['asc', 'desc'] | None = 'desc' + page: int = 1 + limit: int = 20 + + +class RecordingPaginatedResponse(Schema): + """Response for paginated recording list (v-data-table-server compatible).""" + + items: list[dict[str, Any]] + count: int + + +class UnsubmittedNeighborsQuerySchema(Schema): + """Query params for unsubmitted neighbors (next/previous recording IDs).""" + + current: int + sort_by: ( + Literal['id', 'name', 'created', 'modified', 'recorded_date', 'owner_username'] | None + ) = 'created' + sort_direction: Literal['asc', 'desc'] | None = 'desc' + tags: str | None = None # Comma-separated tag texts; recording must have all listed tags + + +class UnsubmittedNeighborsResponse(Schema): + """Response for unsubmitted neighbors: next and previous recording IDs in the vetting order.""" + + next_id: int | None + previous_id: int | None + + class RecordingUploadSchema(Schema): name: str recorded_date: str @@ -130,7 +172,7 @@ class UpdateAnnotationsSchema(Schema): id: int | None -class PulseMetadataSchema(Schema): +class PulseContourSchema(Schema): id: int | None index: int bounding_box: Any @@ -146,6 +188,36 @@ def from_orm(cls, obj: PulseMetadata): ) +class PulseMetadataSchema(Schema): + id: int | None + index: int + curve: list[list[float]] | None = None # list of [time, frequency] + char_freq: list[float] | None = None # point [time, frequency] + knee: list[float] | None = None # point [time, frequency] + heel: list[float] | None = None # point [time, frequency] + + @classmethod + def from_orm(cls, obj: PulseMetadata): + def point_to_list(pt): + if pt is None: + return None + return [pt.x, pt.y] + + def linestring_to_list(ls): + if ls is None: + return None + return [[c[0], c[1]] for c in ls.coords] + + return cls( + id=obj.id, + index=obj.index, + curve=linestring_to_list(obj.curve), + char_freq=point_to_list(obj.char_freq), + knee=point_to_list(obj.knee), + heel=point_to_list(obj.heel), + ) + + @router.post('/') def create_recording( request: HttpRequest, @@ -240,6 +312,66 @@ def update_recording(request: HttpRequest, id: int, recording_data: RecordingUpl return {'message': 'Recording updated successfully', 'id': recording.pk} +def _build_recordings_response( + request: HttpRequest, + page_recordings: list[Recording], + annotation_counts: dict[int, int], + user_has_annotations_ids: set[int], +) -> list[dict]: + items = [] + for rec in page_recordings: + if rec.recording_location: + location = json.loads(rec.recording_location.json) + else: + location = rec.recording_location + items.append( + { + 'id': rec.id, + 'name': rec.name, + 'audio_file': str(rec.audio_file), + 'owner_id': rec.owner_id, + 'recorded_date': rec.recorded_date, + 'recorded_time': rec.recorded_time, + 'equipment': rec.equipment, + 'comments': rec.comments, + 'recording_location': location, + 'grts_cell_id': rec.grts_cell_id, + 'grts_cell': rec.grts_cell, + 'public': rec.public, + 'created': rec.created, + 'modified': rec.modified, + 'software': rec.software, + 'detector': rec.detector, + 'species_list': rec.species_list, + 'site_name': rec.site_name, + 'unusual_occurrences': rec.unusual_occurrences, + 'tags_text': getattr(rec, 'tags_text', None), + 'owner_username': rec.owner.username, + 'audio_file_presigned_url': default_storage.url(rec.audio_file.name), + 'hasSpectrogram': rec.has_spectrogram_attr, + 'userAnnotations': annotation_counts.get(rec.id, 0), + 'userMadeAnnotations': rec.id in user_has_annotations_ids, + 'fileAnnotations': [ + RecordingAnnotationSchema.from_orm(fa).dict() + for fa in rec.recordingannotation_set.all() + ], + } + ) + return items + + +def _base_recordings_queryset(request: HttpRequest, public: bool | None) -> QuerySet[Recording]: + if public is not None and public: + return ( + Recording.objects.filter(public=True) + .exclude(Q(owner=request.user) | Q(spectrogram__isnull=True)) + .annotate(tags_text=ArrayAgg('tags__text', filter=Q(tags__text__isnull=False))) + ) + return Recording.objects.filter(owner=request.user).annotate( + tags_text=ArrayAgg('tags__text', filter=Q(tags__text__isnull=False)) + ) + + @router.delete('/{id}') def delete_recording( request, @@ -264,64 +396,161 @@ def delete_recording( return {'error': 'Annotation not found'} -@router.get('/') +@router.get('/', response=RecordingPaginatedResponse) def get_recordings( - request: HttpRequest, public: bool | None = None, exclude_submitted: bool | None = None + request: HttpRequest, + q: Query[RecordingListQuerySchema], ): - # Filter recordings based on the owner's id or public=True - if public is not None and public: - recordings = ( - Recording.objects.filter(public=True) - .exclude(Q(owner=request.user) | Q(spectrogram__isnull=True)) - .annotate(tags_text=ArrayAgg('tags__text', filter=Q(tags__text__isnull=False))) - .values() + queryset = _base_recordings_queryset(request, q.public) + + if q.exclude_submitted: + submitted_by_user = RecordingAnnotation.objects.filter( + owner=request.user, submitted=True + ).values_list('recording_id', flat=True) + queryset = queryset.exclude(pk__in=submitted_by_user) + + if q.annotation_completed is not None: + has_submitted = RecordingAnnotation.objects.filter(submitted=True).values_list( + 'recording_id', flat=True ) - else: - recordings = ( - Recording.objects.filter(owner=request.user) - .annotate(tags_text=ArrayAgg('tags__text', filter=Q(tags__text__isnull=False))) - .values() + if q.annotation_completed: + queryset = queryset.filter(pk__in=has_submitted) + else: + queryset = queryset.exclude(pk__in=has_submitted) + + if q.search and q.search.strip(): + search = q.search.strip() + search_q = ( + Q(name__icontains=search) + | Q(comments__icontains=search) + | Q(equipment__icontains=search) + | Q(site_name__icontains=search) + | Q(tags__text__icontains=search) ) + queryset = queryset.filter(search_q).distinct() + + if q.tags and q.tags.strip(): + tag_list = [t.strip() for t in q.tags.split(',') if t.strip()] + for tag in tag_list: + queryset = queryset.filter(tags__text=tag) + if tag_list: + queryset = queryset.distinct() + + sort_field = q.sort_by or 'created' + order_prefix = '' if q.sort_direction == 'asc' else '-' + if sort_field == 'owner_username': + queryset = queryset.order_by(f'{order_prefix}owner__username') + else: + queryset = queryset.order_by(f'{order_prefix}{sort_field}') - # TODO with larger dataset it may be better to do this in a queryset instead of python - for recording in recordings: - user = User.objects.get(id=recording['owner_id']) - fileAnnotations = RecordingAnnotation.objects.filter(recording=recording['id']) - recording['fileAnnotations'] = [ - RecordingAnnotationSchema.from_orm(fileAnnotation).dict() - for fileAnnotation in fileAnnotations + # Annotate has_spectrogram in SQL to avoid one query per recording + queryset = queryset.annotate( + has_spectrogram_attr=Exists(Spectrogram.objects.filter(recording=OuterRef('pk'))) + ) + count = queryset.count() + offset = (q.page - 1) * q.limit + + # One query for page of recordings + owner; prefetch file annotations + species (no N+1) + file_annotations_prefetch = Prefetch( + 'recordingannotation_set', + queryset=RecordingAnnotation.objects.prefetch_related('species').order_by('confidence'), + ) + page_recordings = list( + queryset.select_related('owner').prefetch_related(file_annotations_prefetch)[ + offset : offset + q.limit ] - recording['owner_username'] = user.username - recording['audio_file_presigned_url'] = default_storage.url(recording['audio_file']) - recording['hasSpectrogram'] = Recording.objects.get(id=recording['id']).has_spectrogram - if recording['recording_location']: - recording['recording_location'] = json.loads(recording['recording_location'].json) - unique_users_with_annotations = ( - Annotations.objects.filter(recording_id=recording['id']) - .values('owner') - .distinct() - .count() - ) - recording['userAnnotations'] = unique_users_with_annotations - user_has_annotations = ( - Annotations.objects.filter(recording_id=recording['id'], owner=request.user).exists() - or RecordingAnnotation.objects.filter( - recording_id=recording['id'], owner=request.user - ).exists() + ) + + if not page_recordings: + return RecordingPaginatedResponse(items=[], count=count) + + rec_ids = [r.id for r in page_recordings] + # Bulk: unique annotation user count per recording (Annotations table only) + annotation_counts = dict( + Annotations.objects.filter(recording_id__in=rec_ids) + .values('recording_id') + .annotate(c=Count('owner', distinct=True)) + .values_list('recording_id', 'c') + ) + # Bulk: recording IDs where request.user has any annotation (Annotations or RecordingAnnotation) + user_has_annotations_ids = set( + Annotations.objects.filter(recording_id__in=rec_ids, owner=request.user).values_list( + 'recording_id', flat=True ) - recording['userMadeAnnotations'] = user_has_annotations - - if exclude_submitted: - recordings = [ - recording - for recording in recordings - if not any( - annotation['submitted'] and annotation['owner'] == request.user.username - for annotation in recording['fileAnnotations'] - ) - ] + ) | set( + RecordingAnnotation.objects.filter( + recording_id__in=rec_ids, owner=request.user + ).values_list('recording_id', flat=True) + ) - return list(recordings) + items = _build_recordings_response( + request, page_recordings, annotation_counts, user_has_annotations_ids + ) + return RecordingPaginatedResponse(items=items, count=count) + + +def _unsubmitted_recording_ids_ordered( + request: HttpRequest, + sort_by: str = 'created', + sort_direction: str = 'desc', + tags: str | None = None, +) -> list[int]: + submitted_by_user = RecordingAnnotation.objects.filter( + owner=request.user, submitted=True + ).values_list('recording_id', flat=True) + + def apply_filters_and_sort(qs: QuerySet[Recording]) -> QuerySet[Recording]: + qs = qs.exclude(pk__in=submitted_by_user) + if tags and tags.strip(): + tag_list = [t.strip() for t in tags.split(',') if t.strip()] + for tag in tag_list: + qs = qs.filter(tags__text=tag) + if tag_list: + qs = qs.distinct() + order_prefix = '' if sort_direction == 'asc' else '-' + if sort_by == 'owner_username': + qs = qs.order_by(f'{order_prefix}owner__username') + else: + qs = qs.order_by(f'{order_prefix}{sort_by}') + return qs + + my_qs = apply_filters_and_sort(_base_recordings_queryset(request, False)) + shared_qs = apply_filters_and_sort(_base_recordings_queryset(request, True)) + + my_ids = list(my_qs.values_list('id', flat=True)) + shared_ids = list(shared_qs.values_list('id', flat=True)) + return my_ids + shared_ids + + +@router.get('/unsubmitted-neighbors/', response=UnsubmittedNeighborsResponse) +def get_unsubmitted_neighbors( + request: HttpRequest, + q: Query[UnsubmittedNeighborsQuerySchema], +): + current_id = q.current + # Verify user can access the current recording (owner or public) + try: + rec = Recording.objects.get(pk=current_id) + except Recording.DoesNotExist: + return UnsubmittedNeighborsResponse(next_id=None, previous_id=None) + if rec.owner != request.user and not rec.public: + return UnsubmittedNeighborsResponse(next_id=None, previous_id=None) + + sort_by = q.sort_by or 'created' + sort_direction = q.sort_direction or 'desc' + ids = _unsubmitted_recording_ids_ordered( + request, sort_by=sort_by, sort_direction=sort_direction, tags=q.tags + ) + + try: + idx = ids.index(current_id) + except ValueError: + # Current not in unsubmitted list (e.g. already submitted) + return UnsubmittedNeighborsResponse(next_id=None, previous_id=None) + + next_id = ids[idx + 1] if idx + 1 < len(ids) else None + previous_id = ids[idx - 1] if idx - 1 >= 0 else None + return UnsubmittedNeighborsResponse(next_id=next_id, previous_id=previous_id) @router.get('/{id}/') @@ -560,6 +789,25 @@ def get_annotations(request: HttpRequest, id: int): return {'error': 'Recording not found'} +@router.get('/{id}/pulse_contours') +def get_pulse_contours(request: HttpRequest, id: int): + try: + recording = Recording.objects.get(pk=id) + if recording.owner == request.user or recording.public: + computed_pulse_annotation_qs = PulseMetadata.objects.filter( + recording=recording + ).order_by('index') + return [ + PulseContourSchema.from_orm(pulse) for pulse in computed_pulse_annotation_qs.all() + ] + else: + return { + 'error': 'Permission denied. You do not own this recording, and it is not public.' + } + except Recording.DoesNotExist: + return {'error': 'Recording not found'} + + @router.get('/{id}/pulse_data') def get_pulse_data(request: HttpRequest, id: int): try: diff --git a/bats_ai/core/views/vetting_details.py b/bats_ai/core/views/vetting_details.py index 3896e17a..d6b44e03 100644 --- a/bats_ai/core/views/vetting_details.py +++ b/bats_ai/core/views/vetting_details.py @@ -8,7 +8,7 @@ class VettingDetailsSchema(Schema): - id: int + id: int | None # Allow null for cases where no details exist user_id: int reference_materials: str @@ -25,8 +25,8 @@ class UpdateVettingDetailsSchema(Schema): def get_vetting_details_for_user(request: HttpRequest, user_id: int): details = VettingDetails.objects.filter(user_id=user_id).first() - if not details: - raise Http404() + if not details: # Ensure we return a consistent schema even if no details exist + return {'id': None, 'user_id': user_id, 'reference_materials': ''} if details.user != request.user and not request.user.is_staff: # Don't leak user IDs, prefer to return a 404 over a 403 diff --git a/client/src/App.vue b/client/src/App.vue index 0b35626c..079a4f08 100644 --- a/client/src/App.vue +++ b/client/src/App.vue @@ -23,7 +23,7 @@ export default defineComponent({ loadReviewerMaterials, } = useState(); const getShared = async () => { - sharedList.value = (await getRecordings(true)).data; + sharedList.value = (await getRecordings(true)).data.items; }; if (oauthClient === undefined) { throw new Error('Must provide "oauthClient" into component.'); diff --git a/client/src/api/api.ts b/client/src/api/api.ts index e8612c94..cc166879 100644 --- a/client/src/api/api.ts +++ b/client/src/api/api.ts @@ -278,12 +278,84 @@ export interface RecordingTag { user_id: number; } -async function getRecordings(getPublic = false) { - return axiosInstance.get(`/recording/?public=${getPublic}`); +/** Params for paginated recording list (v-data-table-server compatible). */ +export interface RecordingListParams { + public?: boolean; + exclude_submitted?: boolean; + annotation_completed?: boolean; + search?: string; + /** Filter by tags: recording must have all listed tags. Comma-separated or array. */ + tags?: string | string[]; + sort_by?: 'id' | 'name' | 'created' | 'modified' | 'recorded_date' | 'owner_username'; + sort_direction?: 'asc' | 'desc'; + page?: number; + limit?: number; +} + +/** Paginated recording list response (v-data-table-server compatible). */ +export interface RecordingPaginatedResponse { + items: Recording[]; + count: number; +} + +async function getRecordings(getPublic = false, params?: RecordingListParams) { + const query = new URLSearchParams(); + if (getPublic) { + query.set('public', 'true'); + } + if (params) { + if (params.public !== undefined) query.set('public', String(params.public)); + if (params.exclude_submitted !== undefined) + query.set('exclude_submitted', String(params.exclude_submitted)); + if (params.annotation_completed !== undefined) + query.set('annotation_completed', String(params.annotation_completed)); + if (params.search) query.set('search', params.search); + if (params.tags !== undefined) { + const tagStr = Array.isArray(params.tags) ? params.tags.join(',') : params.tags; + if (tagStr) query.set('tags', tagStr); + } + if (params.sort_by) query.set('sort_by', params.sort_by); + if (params.sort_direction) query.set('sort_direction', params.sort_direction); + if (params.page !== undefined) query.set('page', String(params.page)); + if (params.limit !== undefined) query.set('limit', String(params.limit)); + } + if (!params?.page) query.set('page', '1'); + if (!params?.limit) query.set('limit', '20'); + return axiosInstance.get(`/recording/?${query.toString()}`); } async function getRecording(id: string) { return axiosInstance.get(`/recording/${id}/`); } + +export interface UnsubmittedNeighborsParams { + sort_by?: 'id' | 'name' | 'created' | 'modified' | 'recorded_date' | 'owner_username'; + sort_direction?: 'asc' | 'desc'; + /** Comma-separated or array of tag texts; recording must have all listed tags. */ + tags?: string | string[]; +} + +export interface UnsubmittedNeighborsResponse { + next_id: number | null; + previous_id: number | null; +} + +async function getUnsubmittedNeighbors( + currentId: number, + params?: UnsubmittedNeighborsParams +) { + const query = new URLSearchParams({ current: String(currentId) }); + if (params?.sort_by) query.set('sort_by', params.sort_by); + if (params?.sort_direction) query.set('sort_direction', params.sort_direction); + if (params?.tags !== undefined) { + const tagStr = Array.isArray(params.tags) ? params.tags.join(',') : params.tags; + if (tagStr) query.set('tags', tagStr); + } + const response = await axiosInstance.get( + `/recording/unsubmitted-neighbors/?${query.toString()}` + ); + return response; +} + async function getRecordingTags() { return axiosInstance.get(`/recording-tag/`); } @@ -539,13 +611,13 @@ async function getExportStatus(exportId: number) { } export interface VettingDetails { - id: number; + id: number | null; user_id: number; reference_materials: string; } export interface UpdateVettingDetails { - reference_Materials: string; + reference_materials: string; } async function getVettingDetailsForUser(userId: number) { @@ -574,14 +646,28 @@ export interface Contour { index: number; } -export interface ComputedPulseAnnotation { +export interface ComputedPulseContour { id: number; index: number; contours: Contour[]; } -async function getComputedPulseAnnotations(recordingId: number) { - const result = await axiosInstance.get(`/recording/${recordingId}/pulse_data`); +async function getComputedPulseContour(recordingId: number) { + const result = await axiosInstance.get(`/recording/${recordingId}/pulse_contours`); + return result.data; +} + +export interface PulseMetadata { + id: number; + index: number; + curve: number[][] | null; // list of [time, frequency] + char_freq: number[] | null; // point [time, frequency] + knee: number[] | null; // point [time, frequency] + heel: number[] | null; // point [time, frequency] +} + +async function getPulseMetadata(recordingId: number) { + const result = await axiosInstance.get(`/recording/${recordingId}/pulse_data`); return result.data; } @@ -622,7 +708,9 @@ export { getFileAnnotationDetails, getExportStatus, getRecordingTags, - getComputedPulseAnnotations, + getUnsubmittedNeighbors, + getComputedPulseContour, + getPulseMetadata, getCurrentUser, getVettingDetailsForUser, createOrUpdateVettingDetailsForUser, diff --git a/client/src/components/PulseMetadataButton.vue b/client/src/components/PulseMetadataButton.vue new file mode 100644 index 00000000..4dcf61f3 --- /dev/null +++ b/client/src/components/PulseMetadataButton.vue @@ -0,0 +1,228 @@ + + + + + diff --git a/client/src/components/RecordingAnnotations.vue b/client/src/components/RecordingAnnotations.vue index a167ec8e..33b4b256 100644 --- a/client/src/components/RecordingAnnotations.vue +++ b/client/src/components/RecordingAnnotations.vue @@ -38,7 +38,7 @@ export default defineComponent({ const annotations: Ref = ref([]); const detailsDialog = ref(false); const detailRecordingId = ref(-1); - const { configuration, isNaBat, currentUser, markAnnotationSubmitted } = useState(); + const { configuration, isNaBat, currentUser } = useState(); const setSelectedId = (annotation: FileAnnotation) => { selectedAnnotation.value = annotation; @@ -108,9 +108,6 @@ export default defineComponent({ function handleSubmitAnnotation(annotation: FileAnnotation, submitSuccess: boolean) { if (submitSuccess) { annotation.submitted = true; - // Also update submitted status on the recording object - // This forces recomputation of allRecordings - markAnnotationSubmitted(props.recordingId, annotation.id); } } diff --git a/client/src/components/RecordingList.vue b/client/src/components/RecordingList.vue index 39b4f458..51458de0 100644 --- a/client/src/components/RecordingList.vue +++ b/client/src/components/RecordingList.vue @@ -1,6 +1,6 @@ @@ -437,24 +400,25 @@ export default defineComponent({ - - - Highlight Compressed Areas -
+
+ +
+
-
+
-
+
@@ -699,13 +742,14 @@ export default defineComponent({ Prev @@ -719,6 +763,7 @@ export default defineComponent({ Next diff --git a/pyproject.toml b/pyproject.toml index a4af69be..318804c1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -118,8 +118,6 @@ explicit = true [tool.uv.sources] gdal = { index = "large_image_wheels" } -batbot = { git = "https://github.com/Kitware/batbot" } - [tool.black] line-length = 100 diff --git a/scripts/batbot/batch_convert_recordings.py b/scripts/batbot/batch_convert_recordings.py new file mode 100644 index 00000000..e1658671 --- /dev/null +++ b/scripts/batbot/batch_convert_recordings.py @@ -0,0 +1,821 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = [ +# "batbot", +# "click", +# "guano", +# "opencv-python", +# "pydantic", +# "scipy", +# "scikit-image", +# ] +# +# [tool.uv.sources] +# batbot = { git = "https://github.com/Kitware/batbot" } +# /// +"""Batch convert recordings to spectrogram assets with resume and progress. + +Takes a folder of audio recordings and an output folder. For each recording, +runs the BatBot pipeline (same conversion as bats_ai.core.tasks.recording_compute_spectrogram), +saves all output images into a per-recording subfolder, and writes a results JSON +including the recording filename. Supports resume by skipping recordings that +already have results in the output folder. +""" + +# BATBOT HAS A CLI COMMAND CALLED PREPROCESS THAT DOES BATCH PROCESSING OF A FOLDER +# Had an option to generate the metadata +from __future__ import annotations + +from concurrent.futures import ProcessPoolExecutor, as_completed +from datetime import datetime +import json +import logging +import os +from pathlib import Path +import re +import shutil +import tempfile +from typing import Any + +import batbot +import click +import cv2 +from guano import GuanoFile +import numpy as np +from pydantic import BaseModel, ConfigDict, Field, field_validator +from scipy import interpolate +from scipy.ndimage import gaussian_filter1d +from skimage import measure +from skimage.filters import threshold_multiotsu + +# Suppress batbot's logging so progress output stays clean +logging.getLogger('batbot').setLevel(logging.WARNING) +# disable tqdm progress bars +os.environ['TQDM_DISABLE'] = '1' + + +def _parse_guano_datetime(datetime_str: str | None) -> datetime | None: + """Parse datetime string from GUANO (same logic as bats_ai.core.utils.guano_utils).""" + if not datetime_str: + return None + try: + return datetime.strptime(datetime_str, '%Y%m%dT%H%M%S') + except ValueError: + try: + return datetime.fromisoformat(datetime_str) + except ValueError: + return None + + +def _extract_metadata_from_filename(filename: str) -> dict[str, Any]: + stem = Path(filename).stem + match = re.match(r'^(\d+)_(.+)_(\d{8})_(\d{6})(?:_(.*))?$', stem) + if not match: + return {} + cell_id, label_name, date_str, timestamp_str = ( + match.group(1), + match.group(2), + match.group(3), + match.group(4), + ) + out: dict[str, Any] = {} + if cell_id: + try: + out['nabat_grid_cell_grts_id'] = str(int(cell_id)) + except ValueError: + pass + if date_str and len(date_str) == 8 and timestamp_str and len(timestamp_str) == 6: + try: + out['nabat_activation_start_time'] = datetime( + int(date_str[0:4]), + int(date_str[4:6]), + int(date_str[6:8]), + int(timestamp_str[0:2]), + int(timestamp_str[2:4]), + int(timestamp_str[4:6]), + ) + except (ValueError, IndexError): + pass + if label_name and label_name.upper() in ('SW', 'NE', 'NW', 'SE'): + out['quadrant'] = label_name.upper() + return out + + +def extract_guano_metadata( + recording_path: Path, check_filename: bool = True +) -> dict[str, Any] | None: + try: + gfile = GuanoFile(str(recording_path)) + except Exception: + return None + + nabat_fields: dict[str, Any] = { + 'nabat_grid_cell_grts_id': gfile.get('NABat|Grid Cell GRTS ID', None), + 'nabat_latitude': gfile.get('NABat|Latitude', None), + 'nabat_longitude': gfile.get('NABat|Longitude', None), + 'nabat_site_name': gfile.get('NABat|Site Name', None), + } + if nabat_fields['nabat_longitude'] is not None: + try: + lon = float(nabat_fields['nabat_longitude']) + nabat_fields['nabat_longitude'] = lon * -1 if lon > 0 else lon + except (ValueError, TypeError): + nabat_fields['nabat_longitude'] = None + if nabat_fields['nabat_latitude'] is not None: + try: + nabat_fields['nabat_latitude'] = float(nabat_fields['nabat_latitude']) + except (ValueError, TypeError): + nabat_fields['nabat_latitude'] = None + + start_t = ( + _parse_guano_datetime(gfile.get('NABat|Activation start time', None)) + if 'NABat|Activation start time' in gfile + else None + ) + end_t = ( + _parse_guano_datetime(gfile.get('NABat|Activation end time', None)) + if 'NABat|Activation end time' in gfile + else None + ) + species_raw = gfile.get('NABat|Species List', '') + additional: dict[str, Any] = { + 'nabat_activation_start_time': start_t.isoformat() if start_t else None, + 'nabat_activation_end_time': end_t.isoformat() if end_t else None, + 'nabat_software_type': gfile.get('NABat|Software type', None), + 'nabat_species_list': ( + [s.strip() for s in species_raw.split(',') if s.strip()] if species_raw else None + ), + 'nabat_comments': gfile.get('NABat|Comments', None), + 'nabat_detector_type': gfile.get('NABat|Detector type', None), + 'nabat_unusual_occurrences': gfile.get('NABat|Unusual occurrences', '') or None, + } + metadata = {**nabat_fields, **additional} + + if check_filename: + filename_meta = _extract_metadata_from_filename(recording_path.name) + if not metadata.get('nabat_grid_cell_grts_id') and filename_meta.get( + 'nabat_grid_cell_grts_id' + ): + metadata['nabat_grid_cell_grts_id'] = filename_meta['nabat_grid_cell_grts_id'] + if not metadata.get('nabat_activation_start_time') and filename_meta.get( + 'nabat_activation_start_time' + ): + metadata['nabat_activation_start_time'] = filename_meta[ + 'nabat_activation_start_time' + ].isoformat() + if filename_meta.get('quadrant'): + metadata['quadrant'] = filename_meta['quadrant'] + + has_any = any(v is not None for v in metadata.values()) + return metadata if has_any else None + + +# --------------------------------------------------------------------------- +# BatBot metadata models (mirrors bats_ai.core.utils.batbot_metadata) +# --------------------------------------------------------------------------- + + +class SpectrogramMetadata(BaseModel): + uncompressed_path: list[str] = Field(alias='uncompressed.path') + compressed_path: list[str] = Field(alias='compressed.path') + mask_path: list[str] = Field(alias='mask.path') + + +class UncompressedSize(BaseModel): + width_px: int = Field(alias='width.px') + height_px: int = Field(alias='height.px') + + +class CompressedSize(BaseModel): + width_px: int = Field(alias='width.px') + height_px: int = Field(alias='height.px') + + +class SizeMetadata(BaseModel): + uncompressed: UncompressedSize + compressed: CompressedSize + + +class FrequencyMetadata(BaseModel): + min_hz: int = Field(alias='min.hz') + max_hz: int = Field(alias='max.hz') + pixels_hz: list[int] = Field(alias='pixels.hz') + + +class Segment(BaseModel): + curve_hz_ms: list[list[float]] = Field(alias='curve.(hz,ms)') + start_ms: float = Field(alias='segment start.ms') + end_ms: float = Field(alias='segment end.ms') + duration_ms: float = Field(alias='segment duration.ms') + contour_start_ms: float = Field(alias='contour start.ms') + contour_end_ms: float = Field(alias='contour end.ms') + contour_duration_ms: float = Field(alias='contour duration.ms') + threshold_amp: int = Field(alias='threshold.amp') + peak_f_ms: float | None = Field(None, alias='peak f.ms') + fc_ms: float | None = Field(None, alias='fc.ms') + hi_fc_knee_ms: float | None = Field(None, alias='hi fc:knee.ms') + lo_fc_heel_ms: float | None = Field(None, alias='lo fc:heel.ms') + bandwidth_hz: int | None = Field(None, alias='bandwidth.hz') + hi_f_hz: int | None = Field(None, alias='hi f.hz') + lo_f_hz: int | None = Field(None, alias='lo f.hz') + peak_f_hz: int | None = Field(None, alias='peak f.hz') + fc_hz: int | None = Field(None, alias='fc.hz') + hi_fc_knee_hz: int | None = Field(None, alias='hi fc:knee.hz') + lo_fc_heel_hz: int | None = Field(None, alias='lo fc:heel.hz') + harmonic_flag: bool = Field(False, alias='harmonic.flag') + harmonic_peak_f_ms: float | None = Field(None, alias='harmonic peak f.ms') + harmonic_peak_f_hz: int | None = Field(None, alias='harmonic peak f.hz') + echo_flag: bool = Field(False, alias='echo.flag') + echo_peak_f_ms: float | None = Field(None, alias='echo peak f.ms') + echo_peak_f_hz: int | None = Field(None, alias='echo peak f.hz') + slope_at_hi_fc_knee_khz_per_ms: float | None = Field(None, alias='slope@hi fc:knee.khz/ms') + slope_at_fc_khz_per_ms: float | None = Field(None, alias='slope@fc.khz/ms') + slope_at_low_fc_heel_khz_per_ms: float | None = Field(None, alias='slope@low fc:heel.khz/ms') + slope_at_peak_khz_per_ms: float | None = Field(None, alias='slope@peak.khz/ms') + slope_avg_khz_per_ms: float | None = Field(None, alias='slope[avg].khz/ms') + slope_hi_avg_khz_per_ms: float | None = Field(None, alias='slope/hi[avg].khz/ms') + slope_mid_avg_khz_per_ms: float | None = Field(None, alias='slope/mid[avg].khz/ms') + slope_lo_avg_khz_per_ms: float | None = Field(None, alias='slope/lo[avg].khz/ms') + slope_box_khz_per_ms: float | None = Field(None, alias='slope[box].khz/ms') + slope_hi_box_khz_per_ms: float | None = Field(None, alias='slope/hi[box].khz/ms') + slope_mid_box_khz_per_ms: float | None = Field(None, alias='slope/mid[box].khz/ms') + slope_lo_box_khz_per_ms: float | None = Field(None, alias='slope/lo[box].khz/ms') + + @field_validator('curve_hz_ms', mode='before') + @classmethod + def validate_curve(cls, v: Any) -> list[list[float]]: + if isinstance(v, list): + return v + return [] + + +class BatbotMetadata(BaseModel): + model_config = ConfigDict(validate_by_name=True, validate_by_alias=True) + wav_path: str = Field(alias='wav.path') + spectrogram: SpectrogramMetadata + global_threshold_amp: int = Field(alias='global_threshold.amp') + sr_hz: int = Field(alias='sr.hz') + duration_ms: float = Field(alias='duration.ms') + frequencies: FrequencyMetadata + size: SizeMetadata + segments: list[Segment] + + +class CompressedSpectrogramData(BaseModel): + starts: list[float] + stops: list[float] + widths: list[float] + + +def parse_batbot_metadata(file_path: str | Path) -> BatbotMetadata: + with open(file_path) as f: + data = json.load(f) + return BatbotMetadata(**data) + + +def convert_to_compressed_spectrogram_data(metadata: BatbotMetadata) -> CompressedSpectrogramData: + duration_ms = metadata.duration_ms + starts_ms: list[float] = [] + stops_ms: list[float] = [] + widths_px_compressed: list[float] = [] + segment_times: list[float] = [] + compressed_width = metadata.size.compressed.width_px + total_time = 0.0 + + if metadata.segments: + for segment in metadata.segments: + starts_ms.append(segment.start_ms) + stops_ms.append(segment.end_ms) + time = segment.end_ms - segment.start_ms + segment_times.append(time) + total_time += time + for time in segment_times: + width_px = (time / total_time) * compressed_width + widths_px_compressed.append(width_px) + else: + starts_ms = [0] + stops_ms = [duration_ms] + widths_px_compressed = [compressed_width] + + return CompressedSpectrogramData( + starts=starts_ms, + stops=stops_ms, + widths=widths_px_compressed, + ) + + +def convert_to_segment_data(metadata: BatbotMetadata) -> list[dict[str, Any]]: + segment_data: list[dict[str, Any]] = [] + for index, segment in enumerate(metadata.segments): + segment_data.append( + { + 'segment_index': index, + 'curve_hz_ms': segment.curve_hz_ms, + 'char_freq_ms': segment.fc_ms, + 'char_freq_hz': segment.fc_hz, + 'knee_ms': segment.hi_fc_knee_ms, + 'knee_hz': segment.hi_fc_knee_hz, + 'heel_ms': segment.lo_fc_heel_ms, + 'heel_hz': segment.lo_fc_heel_hz, + } + ) + return segment_data + + +# --------------------------------------------------------------------------- +# Contour extraction (mirrors bats_ai.core.utils.contour_utils) +# --------------------------------------------------------------------------- + + +def _auto_histogram_levels( + data: np.ndarray, + bins: int = 512, + smooth_sigma: float = 2.0, + variance_threshold: float = 400.0, + max_levels: int = 5, +) -> list[float]: + if data.size == 0: + return [] + hist, edges = np.histogram(data, bins=bins) + counts = gaussian_filter1d(hist.astype(np.float64), sigma=smooth_sigma) + centers = (edges[:-1] + edges[1:]) / 2.0 + mask = counts > 0 + counts = counts[mask] + centers = centers[mask] + if counts.size == 0: + return [] + groups = [] + current_centers = [] + current_weights = [] + for center, weight in zip(centers, counts): + weight = max(float(weight), 1e-9) + current_centers.append(center) + current_weights.append(weight) + values = np.array(current_centers, dtype=np.float64) + weights = np.array(current_weights, dtype=np.float64) + mean = np.average(values, weights=weights) + variance = np.average((values - mean) ** 2, weights=weights) + if variance > variance_threshold and len(current_centers) > 1: + last_center = current_centers.pop() + last_weight = current_weights.pop() + values = np.array(current_centers, dtype=np.float64) + weights = np.array(current_weights, dtype=np.float64) + if weights.sum() > 0: + groups.append(np.average(values, weights=weights)) + current_centers = [last_center] + current_weights = [last_weight] + if current_centers: + groups.append(np.average(current_centers, weights=current_weights)) + groups = sorted(set(groups)) + if len(groups) <= 1: + return groups + groups = groups[1:] + if max_levels and len(groups) > max_levels: + idx = np.linspace(0, len(groups) - 1, max_levels, dtype=int) + groups = [groups[i] for i in idx] + return groups + + +def _compute_auto_levels( + data: np.ndarray, + mode: str, + percentile_values: list[float], + multi_otsu_classes: int, + min_intensity: float, + hist_bins: int, + hist_sigma: float, + hist_variance_threshold: float, + hist_max_levels: int, +) -> list[float]: + valid = data[data >= min_intensity] + if valid.size == 0: + return [] + if mode == 'multi-otsu': + try: + return threshold_multiotsu(valid, classes=multi_otsu_classes).tolist() + except Exception: + pass + if mode == 'histogram': + return _auto_histogram_levels( + valid, + bins=hist_bins, + smooth_sigma=hist_sigma, + variance_threshold=hist_variance_threshold, + max_levels=hist_max_levels, + ) + return np.percentile(valid, sorted(percentile_values)).tolist() + + +def _polygon_area(points: np.ndarray) -> float: + if len(points) < 3: + return 0.0 + x, y = points[:, 0], points[:, 1] + return 0.5 * abs(np.dot(x, np.roll(y, -1)) - np.dot(y, np.roll(x, -1))) + + +def _smooth_contour_spline(contour: np.ndarray, smoothing_factor: float = 0.1) -> np.ndarray: + if not np.array_equal(contour[0], contour[-1]): + contour = np.vstack([contour, contour[0]]) + try: + tck, _ = interpolate.splprep( + [contour[:, 0], contour[:, 1]], + s=len(contour) * smoothing_factor, + per=True, + ) + alpha = np.linspace(0, 1, max(len(contour), 100)) + x, y = interpolate.splev(alpha, tck) + return np.column_stack([x, y]) + except Exception: + return contour + + +def _filter_contours_by_segment( + contours: list[tuple[np.ndarray, float]], + segment_boundaries: list[tuple[float, float]], +) -> list[list[tuple[np.ndarray, float]]]: + segment_contours: list[list[tuple[np.ndarray, float]]] = [[] for _ in segment_boundaries] + for contour, level in contours: + x_coords = contour[:, 0] + min_x = np.min(x_coords) + max_x = np.max(x_coords) + center_x = np.mean(x_coords) + for seg_idx, (seg_start, seg_end) in enumerate(segment_boundaries): + if (seg_start <= center_x < seg_end) or (min_x < seg_end and max_x > seg_start): + points_in_segment = np.sum((x_coords >= seg_start) & (x_coords < seg_end)) + total_points = len(x_coords) + if points_in_segment / total_points >= 0.5 or (seg_start <= center_x < seg_end): + segment_contours[seg_idx].append((contour, level)) + break + return segment_contours + + +def _extract_contours( + image_path: Path, + *, + levels_mode: str = 'percentile', + percentile_values: list[float] = (60, 70, 80, 90, 92, 94, 96, 98), + min_area: float = 30.0, + smoothing_factor: float = 0.08, + min_intensity: float = 1.0, + multi_otsu_classes: int = 4, + hist_bins: int = 512, + hist_sigma: float = 2.0, + hist_variance_threshold: float = 400.0, + hist_max_levels: int = 5, +) -> tuple[list[tuple[np.ndarray, float]], tuple[int, ...]]: + img = cv2.imread(str(image_path)) + if img is None: + raise RuntimeError(f'Could not read {image_path}') + gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + blurred = cv2.GaussianBlur(gray, (15, 15), 3) + data = blurred + levels = _compute_auto_levels( + data, + mode=levels_mode, + percentile_values=percentile_values, + multi_otsu_classes=multi_otsu_classes, + min_intensity=min_intensity, + hist_bins=hist_bins, + hist_sigma=hist_sigma, + hist_variance_threshold=hist_variance_threshold, + hist_max_levels=hist_max_levels, + ) + contours: list[tuple[np.ndarray, float]] = [] + for level in levels: + for c in measure.find_contours(data, level): + xy = c[:, ::-1] + if not np.array_equal(xy[0], xy[-1]): + xy = np.vstack([xy, xy[0]]) + if _polygon_area(xy) < min_area: + continue + smooth = _smooth_contour_spline(xy, smoothing_factor) + contours.append((smooth, level)) + return sorted(contours, key=lambda x: x[1]), img.shape + + +def process_spectrogram_assets_for_contours( + assets: dict[str, Any], + levels_mode: str = 'percentile', + percentile_values: list[float] = (60, 70, 80, 90, 92, 94, 96, 98), + min_area: float = 30.0, + smoothing_factor: float = 0.08, + min_intensity: float = 1.0, + multi_otsu_classes: int = 4, + hist_bins: int = 512, + hist_sigma: float = 2.0, + hist_variance_threshold: float = 400.0, + hist_max_levels: int = 5, +) -> dict[str, Any]: + compressed_data = assets.get('compressed', {}) + mask_paths = compressed_data.get('masks', []) + widths = compressed_data.get('widths', []) + height = compressed_data.get('height', 0) + starts = compressed_data.get('starts', []) + global_freq_min = assets.get('freq_min', 0) + global_freq_max = assets.get('freq_max', 0) + stops = compressed_data.get('stops', []) + all_segments_data: list[dict[str, Any]] = [] + processed_images: set[Path] = set() + for path_str in mask_paths: + img_path = Path(path_str).resolve() + if not img_path.exists(): + continue + if img_path in processed_images: + continue + processed_images.add(img_path) + contours, shape = _extract_contours( + img_path, + levels_mode=levels_mode, + percentile_values=percentile_values, + min_area=min_area, + smoothing_factor=smoothing_factor, + min_intensity=min_intensity, + multi_otsu_classes=multi_otsu_classes, + hist_bins=hist_bins, + hist_sigma=hist_sigma, + hist_variance_threshold=hist_variance_threshold, + hist_max_levels=hist_max_levels, + ) + segment_boundaries: list[tuple[float, float]] = [] + cumulative_x = 0.0 + for w in widths: + segment_boundaries.append((cumulative_x, cumulative_x + w)) + cumulative_x += w + segment_contours_list = _filter_contours_by_segment(contours, segment_boundaries) + segments_output: list[dict[str, Any]] = [] + width_to_this_seg = 0.0 + for seg_idx, seg_contours in enumerate(segment_contours_list): + all_y = ( + np.concatenate([c[:, 1] for c, _ in seg_contours]) if seg_contours else np.array([]) + ) + freq_min = float(np.min(all_y).round(3)) if all_y.size else None + freq_max = float(np.max(all_y).round(3)) if all_y.size else None + start_time = starts[seg_idx] if seg_idx < len(starts) else 0 + stop_time = stops[seg_idx] if seg_idx < len(stops) else 0 + width = widths[seg_idx] if seg_idx < len(widths) else 0 + time_per_pixel = (stop_time - start_time) / width if width else 0 + mhz_per_pixel = (global_freq_max - global_freq_min) / height if height else 0 + transformed_contours = [] + for contour, level in seg_contours: + new_curve = [ + [ + (point[0] - width_to_this_seg) * time_per_pixel + start_time, + global_freq_max - (point[1] * mhz_per_pixel), + ] + for point in contour + ] + transformed_contours.append( + { + 'level': float(level), + 'curve': new_curve, + 'index': seg_idx, + } + ) + segment_obj: dict[str, Any] = { + 'segment_index': seg_idx, + 'contour_count': len(seg_contours), + 'freq_min': freq_min, + 'freq_max': freq_max, + 'contours': transformed_contours, + } + if seg_idx < len(widths): + segment_obj['width_px'] = widths[seg_idx] + if seg_idx < len(starts): + segment_obj['start_ms'] = starts[seg_idx] + if seg_idx < len(stops): + segment_obj['stop_ms'] = stops[seg_idx] + width_to_this_seg += widths[seg_idx] if seg_idx < len(widths) else 0 + segments_output.append(segment_obj) + all_segments_data.extend(segments_output) + return { + 'segments': sorted(all_segments_data, key=lambda x: x.get('segment_index', 0)), + 'total_segments': len(all_segments_data), + } + + +def generate_spectrogram_assets(recording_path: str, output_folder: str) -> dict[str, Any]: + """Run BatBot pipeline and return result dict (paths are under output_folder).""" + batbot.pipeline(recording_path, output_folder=output_folder, quiet=True) + metadata_file = Path(recording_path).with_suffix('.metadata.json').name + metadata_path = Path(output_folder) / metadata_file + metadata = parse_batbot_metadata(metadata_path) + + uncompressed_paths = [str(p) for p in metadata.spectrogram.uncompressed_path] + compressed_paths = [str(p) for p in metadata.spectrogram.compressed_path] + mask_paths = [str(p) for p in metadata.spectrogram.mask_path] + + compressed_metadata = convert_to_compressed_spectrogram_data(metadata) + segment_curve_data = convert_to_segment_data(metadata) + + return { + 'duration': metadata.duration_ms, + 'freq_min': metadata.frequencies.min_hz, + 'freq_max': metadata.frequencies.max_hz, + 'normal': { + 'paths': uncompressed_paths, + 'width': metadata.size.uncompressed.width_px, + 'height': metadata.size.uncompressed.height_px, + }, + 'compressed': { + 'paths': compressed_paths, + 'masks': mask_paths, + 'width': metadata.size.compressed.width_px, + 'height': metadata.size.compressed.height_px, + 'widths': compressed_metadata.widths, + 'starts': compressed_metadata.starts, + 'stops': compressed_metadata.stops, + 'segments': segment_curve_data, + }, + } + + +# --------------------------------------------------------------------------- +# Batch conversion with resume +# --------------------------------------------------------------------------- + +RESULTS_FILENAME = 'results.json' +DEFAULT_AUDIO_GLOB = '*.wav' + + +def _result_path(output_folder: Path, recording_stem: str) -> Path: + return output_folder / recording_stem / RESULTS_FILENAME + + +def _already_done(output_folder: Path, recording_stem: str) -> bool: + return _result_path(output_folder, recording_stem).exists() + + +def _copy_batbot_output(tmpdir: Path, out_subdir: Path) -> None: + """Copy every file batbot wrote in tmpdir into out_subdir.""" + out_subdir.mkdir(parents=True, exist_ok=True) + for f in tmpdir.iterdir(): + if f.is_file(): + shutil.copy2(f, out_subdir / f.name) + + +def _result_with_basename_paths(result: dict[str, Any]) -> dict[str, Any]: + """Return a copy of result with all paths replaced by basenames.""" + out = json.loads(json.dumps(result)) + for key in ('normal', 'compressed'): + if key not in out: + continue + for path_key in ('paths', 'masks'): + if path_key in out[key] and out[key][path_key]: + out[key][path_key] = [Path(p).name for p in out[key][path_key]] + return out + + +def process_one( + recording_path: Path, + output_folder: Path, +) -> dict[str, Any]: + """Convert one recording and save images + results.json. Returns result for this recording.""" + stem = recording_path.stem + out_subdir = output_folder / stem + out_subdir.mkdir(parents=True, exist_ok=True) + + with tempfile.TemporaryDirectory() as tmpdir: + tmpdir_p = Path(tmpdir) + temp_audio = tmpdir_p / recording_path.name + shutil.copy2(recording_path, temp_audio) + + result = generate_spectrogram_assets(str(temp_audio), str(tmpdir_p)) + + # Resolve paths so contour extraction can read mask images + for path_key in ('paths', 'masks'): + resolved = [] + for p in result['compressed'].get(path_key, []): + pp = Path(p) + if not pp.is_absolute(): + pp = tmpdir_p / pp.name + resolved.append(str(pp.resolve())) + result['compressed'][path_key] = resolved + + contours_data = process_spectrogram_assets_for_contours(result) + result['compressed']['contours'] = contours_data + + _copy_batbot_output(tmpdir_p, out_subdir) + + guano_meta = extract_guano_metadata(recording_path) + if guano_meta is not None: + result['guano'] = guano_meta + + result_for_json = _result_with_basename_paths(result) + payload = { + 'recording_filename': recording_path.name, + 'recording_stem': stem, + **result_for_json, + } + results_path = _result_path(output_folder, stem) + with open(results_path, 'w') as f: + json.dump(payload, f, indent=2) + + return payload + + +def gather_recordings(input_folder: Path, pattern: str) -> list[Path]: + return sorted(input_folder.glob(pattern)) + + +@click.command() +@click.argument( + 'input_folder', + type=click.Path(exists=True, file_okay=False, path_type=Path), +) +@click.argument( + 'output_folder', + type=click.Path(path_type=Path), +) +@click.option( + '--pattern', + default=DEFAULT_AUDIO_GLOB, + show_default=True, + help='Glob pattern for audio files (e.g. "*.wav", "*.WAV").', +) +@click.option( + '--no-resume', + is_flag=True, + help='Ignore existing results and reprocess all recordings.', +) +@click.option( + '-j', + '--jobs', + default=1, + type=click.IntRange(min=1), + show_default=True, + help='Number of recordings to process in parallel.', +) +def main( + input_folder: Path, + output_folder: Path, + pattern: str, + no_resume: bool, + jobs: int, +) -> None: + """Convert a folder of recordings to spectrogram assets with optional resume. + + For each recording in INPUT_FOLDER (matching --pattern), runs the BatBot + pipeline, writes images and a results JSON (including recording filename) + into OUTPUT_FOLDER//. If OUTPUT_FOLDER//results.json + already exists, that recording is skipped (resume). Use --no-resume to reprocess all. + Use --jobs N to process N recordings at a time. + """ + recordings = gather_recordings(input_folder, pattern) + if not recordings: + click.echo(f'No files matching "{pattern}" in {input_folder}', err=True) + raise SystemExit(1) + + output_folder.mkdir(parents=True, exist_ok=True) + to_do = [r for r in recordings if no_resume or not _already_done(output_folder, r.stem)] + skipped = len(recordings) - len(to_do) + + click.echo( + f'Recordings found: {len(recordings)}. \ + To process: {len(to_do)}. \ + Skipped (resume): {skipped}. \ + Jobs: {jobs}.' + ) + + if jobs == 1: + with click.progressbar( + to_do, + label='Converting', + show_pos=True, + show_percent=True, + ) as bar: + for recording_path in bar: + try: + process_one(recording_path, output_folder) + except Exception as e: + click.echo(f'\nError processing {recording_path}: {e}', err=True) + raise + else: + failed: list[tuple[Path, BaseException]] = [] + with click.progressbar( + length=len(to_do), + label='Converting', + show_pos=True, + show_percent=True, + ) as bar: + with ProcessPoolExecutor(max_workers=jobs) as executor: + future_to_path = { + executor.submit(process_one, recording_path, output_folder): recording_path + for recording_path in to_do + } + for future in as_completed(future_to_path): + recording_path = future_to_path[future] + try: + future.result() + except Exception as e: + failed.append((recording_path, e)) + bar.update(1) + if failed: + for path, e in failed: + click.echo(f'Error processing {path}: {e}', err=True) + raise SystemExit(1) + + click.echo(f'Done. Processed {len(to_do)} recordings into {output_folder}.') + + +if __name__ == '__main__': + main() diff --git a/uv.lock b/uv.lock index 95bd5e0e..2e367bc6 100644 --- a/uv.lock +++ b/uv.lock @@ -190,8 +190,8 @@ wheels = [ [[package]] name = "batbot" -version = "0.1.0" -source = { git = "https://github.com/Kitware/batbot#669f21cd0de908fe9264e8f4ea82314c6abc7821" } +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, { name = "cryptography" }, @@ -211,6 +211,10 @@ dependencies = [ { name = "sphinx-click" }, { name = "tqdm" }, ] +sdist = { url = "https://files.pythonhosted.org/packages/bc/41/f1702fa58ab8207e6b3f6059ee5e42e868c5b861059db1abafdc7786f390/batbot-0.1.2.tar.gz", hash = "sha256:92476da8ff3e55abc03c06ee9c2f72100b6efdbbf45e60c62612a71aa2b8c0c7", size = 3903360, upload-time = "2026-02-10T19:49:23.531Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/a0/216db39b5c6cfb4a00bb244419951830454b723c1d097038526f7dd5f840/batbot-0.1.2-py3-none-any.whl", hash = "sha256:56bc086784deb60af3f9649d0798ee7abb776116a5f24cb8bad1e643c9aa9c85", size = 35792, upload-time = "2026-02-10T19:49:22.017Z" }, +] [[package]] name = "bats-ai" @@ -301,7 +305,7 @@ type = [ [package.metadata] requires-dist = [ - { name = "batbot", git = "https://github.com/Kitware/batbot" }, + { name = "batbot" }, { name = "celery" }, { name = "django", extras = ["argon2"] }, { name = "django-allauth" }, @@ -2910,12 +2914,23 @@ wheels = [ [[package]] name = "pyastar2d" -version = "1.1.0" -source = { git = "https://github.com/bluemellophone/batbot-pyastar2d?rev=master#192535b43a50fc4e0f80d4d731fa799f3559354b" } +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "imageio" }, { name = "numpy" }, ] +sdist = { url = "https://files.pythonhosted.org/packages/9f/03/8333b690c13f245a10d9b6883b816c3fbb0721453b96a60c90b0648503da/pyastar2d-1.1.1.tar.gz", hash = "sha256:73b6375284851f53112aeb8c5c2abc81576d594928705ca76274360e67b0e6c0", size = 13446, upload-time = "2026-02-05T13:11:39.781Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3f/c5/4e7ea66138a72388805d238cd764ad8b4fbf67d8082edb645921b21686eb/pyastar2d-1.1.1-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:4f7a8b9924d042717acf6efc94251adf40410ca2a46913b01b9d8f396334e196", size = 12098, upload-time = "2026-02-05T13:11:29.095Z" }, + { url = "https://files.pythonhosted.org/packages/d3/f6/e4c48044964d643b5842de36142aec3f5743ec358a4fcffb7e2f1e3bf8e2/pyastar2d-1.1.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:bfb83a12bc6e9e809e837435448a9b8d649b80118eac094186e07dafdd18c194", size = 12567, upload-time = "2026-02-05T13:11:30.883Z" }, + { url = "https://files.pythonhosted.org/packages/9e/3d/2b5cb62b8c56a022bd3445ee0cc432a7026856c4ab1e5cac32e45cadcf62/pyastar2d-1.1.1-cp39-abi3-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cceaa19f7d5e4c68124ac5881950035fac183a6229ca1ae4251f1b5445b145d0", size = 56611, upload-time = "2026-02-05T13:11:32.166Z" }, + { url = "https://files.pythonhosted.org/packages/38/7b/c695c4d6c584012d534c0acb300b3f5c99685f8ad5a729f71cb86358762b/pyastar2d-1.1.1-cp39-abi3-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:460e3e308ee45010b2d810fdc92c1f45a50845a28cdb7a3546205b345aa3bd29", size = 56557, upload-time = "2026-02-05T13:11:33.093Z" }, + { url = "https://files.pythonhosted.org/packages/57/e5/27aa951b3beed307481ea45ec3f4e63d9d633624f72ec57feab1c4435269/pyastar2d-1.1.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:5dd105ae437fa22245e19723e1ff8885ade43552adfc1f86bdd577194d352037", size = 1031746, upload-time = "2026-02-05T13:11:34.284Z" }, + { url = "https://files.pythonhosted.org/packages/d4/56/312569fa941ec295e4cc2db56e182967349289a5e822eff2c39895eea22f/pyastar2d-1.1.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:e58e1379992a06e67eaf951231c1c6a925e729db3534d0def3e46a12384b5255", size = 1084083, upload-time = "2026-02-05T13:11:36.481Z" }, + { url = "https://files.pythonhosted.org/packages/e3/c3/970bb1248a9d72e6a6e4dacd0528d0d1ce64cc4b2a42c21d9f9c91f9f2d0/pyastar2d-1.1.1-cp39-abi3-win32.whl", hash = "sha256:c4957107bdb13a4018a66755614d12526a12201d563465aeb8d34ca49ca1d4bc", size = 14751, upload-time = "2026-02-05T13:11:37.804Z" }, + { url = "https://files.pythonhosted.org/packages/2f/e2/59181215e05a8414d4d269042bc0da61fd7e0c0dca2a22724dc79af7098e/pyastar2d-1.1.1-cp39-abi3-win_amd64.whl", hash = "sha256:f30b3ba6db094c4f4a9c8572c97b65884f6217971e4c75e48b683ee5c09bb0a2", size = 15328, upload-time = "2026-02-05T13:11:38.636Z" }, +] [[package]] name = "pycodestyle"