Here is an example using data row ids as data row identifier and doing a mapping of the same feature schema id from the origin and the destination project.
- You can retrieve the feature schema ids via onto_normalized (returns a json file of the ontology)
- You can retrieve data rows ids (or global_keys) from an export
import labelbox as lb
from labelbox.schema.conflict_resolution_strategy import ConflictResolutionStrategy
API_KEY = None
PROJECT_ID = 'clzscun5p07oh07338qo05497'
client = lb.Client(api_key=API_KEY)
project = client.get_project(PROJECT_ID)
clone_project = project.clone()
project_ontology = project.ontology()
onto_normalized = client.get_ontology(project_ontology.uid).normalized
onto_normalized['tools'][0]['featureSchemaId']
#given you are doing a 1:1 you need to map the ontology from the source project to the destination project with the same feature schema id
annotation_ontology_mapping = {"clk8ru1f8099u07yoejo913vi" : "clk8ru1f8099u07yoejo913vi"}
data_row_ids = ['clz1cvi7w02i00734bx07tdui']
send_to_annotate_params = {
"source_project_id": project.uid,
"annotations_ontology_mapping": annotation_ontology_mapping,
"exclude_data_rows_in_project": False,
"override_existing_annotations_rule": ConflictResolutionStrategy.OverrideWithPredictions,
"batch_priority": 5,
}
# Get task id to workflow you want to send data rows. If sent to initial labeling queue, labels will be pre-labels.
#queue_id = [queue.uid for queue in clone_project.task_queues() if queue.queue_type == "MANUAL_REVIEW_QUEUE" ][0]
task = client.send_to_annotate_from_catalog(
destination_project_id=clone_project.uid,
task_queue_id=None, # ID of workflow task, set ID to None if you want to send data rows with labels to the Done queue.
batch_name="Prediction Import Demo Batch",
data_rows=lb.DataRowIds(
data_row_ids # Provide a list of global keys from source project
),
params=send_to_annotate_params
)
task.wait_till_done()
print(f"Errors: {task.errors}")