Batch with annotations

Hello everyone,

I am currently facing an issue.
I have a dataset that is used by mulitple projects. I would like to maintain the annotations made in project A, when I add the data rows to project B, using the python API.

I have made some custom functions to handle the data, but I cannot seem to load the annotations like this.

Thank you.

def fetchDatasetDatarows(self, name:str = None, ID:str = None, sorted:bool = False) -> bool:
        """
        Fetches all data rows and their associated annotations from a Labelbox dataset.

        Parameters
        ----------
        name : str, optional
            The name of the dataset to fetch data rows from.
        ID : str, optional
            The ID of the dataset to fetch data rows from.
        sorted : bool, optional
            Sorts the dataRows alphabetically using frame number. 
            Careful when using for multi-patient datasets.

        Returns
        -------
        bool
            True if the export and data retrieval are successful, False otherwise.

        Behavior
        --------
        - Connects to the specified dataset by name or ID.
        - Sends an export request to the Labelbox server for the dataset.
        - Waits for the export task to complete.
        - Populates `self.dataset_datarows` with the fetched data rows and their annotations.
        - Logs errors if the export task fails or encounters issues.
        """
        
        if name is not None or ID is not None:
            ret = self.create_dataset(files=None, name= name, ID = ID)
            if not ret:
                return False
            
        if self.datasetID is None and self.dataset is None:
            return False

        export_params= {
            "attachments": True,
            "metadata_fields": True,
            "data_row_details": True,
            "project_details": True,
            "label_details": True,
            "performance_details": True,
            "interpolated_frames": True
        }
        self._dataset_datarows = []

        if self.dataset is None:
            self._dataset = self.client.get_dataset(self.projectID)
        # export_task = self.client.get_project(self.projectID).export(params=export_params,filters={})
        # export_task.wait_till_done()
    
        export_task = self.dataset.export(params=export_params,filters={}) 
        export_task.wait_till_done()

        if export_task.has_result():
            for output in export_task.get_stream():
                self.dataset_datarows.append(json.loads(output.json_str))
            
            if sorted: 
                self._dataset_datarows = self.sort_datarows(self.dataset_datarows)

            return True
        
        if export_task.has_errors():
            print(f'Errors in task: ', export_task.errors)
        
        return False

    def create_project_batch(self, ids:list[str], name:str = None) -> bool:
        """
        Creates a batch of data rows in the current project.

        Parameters
        ----------
        ids : list[str]
            List of data row IDs to include in the batch.
        name : str, optional
            Name prefix for the batch.

        Returns
        -------
        bool
            True if the batch is created successfully, False otherwise.

        Behavior
        --------
        - Creates a batch in the current project with the specified data rows.
        - Waits for the batch creation task to complete.
        - Handles errors and logs messages.
        """
        if name is None:
            name = str(datetime.datetime.now().strftime("%Y%m%d%H%M%S")) + "_batch"
        
        if self.project is None:
            self._project = self.client.get_project(self.projectID)

        task = self.project.create_batches(
            name_prefix=name,
            data_rows=ids,
            priority=1
            )
        
        task.wait_till_done()
        if task.errors() is not None:
            print(f'[ERROR] Errors occured when creating a batch `{name}` for project.')
            return False
        return True

If you are using the same ontology or at least the same tools then you can use this to import your annotations: How to: Clone a Project and Copy Data Rows and Labels in Labelbox - #2 by PT
If you have difficulties with this method could you provide error if any, the SDK version you are using and the destination project ID so we can look into it?

Thank you @PT .
I have a follow-up question: How do I know the queue.queue_type name of the available queues? FOr example if I want to move something to the Initial Labelling task, how can I find the string in code that matches that?

Thanks.

initial labeling queue means there is no label for a particular asset so, de facto if there is/are label(s) you cannot move it back to the initial labeling queue except if you are erasing for this you can use - Label — Python SDK reference 7.2.0 documentation the bolean parameter at the end is to either delete the label (False) or keep the existing as template (True).

 BulkDeletable._bulk_delete(labels, False)