Skip to content

Commit

Permalink
Add backwards compatibility checks for v1.1.0 release (#3489)
Browse files Browse the repository at this point in the history
Summary:
X-link: facebookresearch/FBGEMM#570

Pull Request resolved: #3489

- Add backwards compatibility checks for v1.1.0 release

Reviewed By: spcyppt

Differential Revision: D66998092

fbshipit-source-id: d8fe7f2f5603c7c5d75fa55c240bc6538cf0ce11
  • Loading branch information
q10 authored and facebook-github-bot committed Dec 10, 2024
1 parent bdb7095 commit 5dbeebf
Show file tree
Hide file tree
Showing 3 changed files with 75 additions and 47 deletions.
30 changes: 0 additions & 30 deletions fbgemm_gpu/test/release/stable_ops.json

This file was deleted.

40 changes: 40 additions & 0 deletions fbgemm_gpu/test/release/stable_ops_v1.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
{
"_description":"This is a dict containing schema of FBGEMM_GPU ops that are marked as stable. The schema of future releases need to be backward and forward compatible. For more details, please see https://docs.google.com/document/d/18I0lSkyHHqJ5BY30bx8YhpQHAMOg25nAFV2zeO8PIGk/edit#heading=h.y00l3f1ht5u1",
"_majorversion":1,
"releases":[
{
"version":"1.0.0",
"api":{
"torch.ops.fbgemm.jagged_to_padded_dense":"fbgemm::jagged_to_padded_dense(Tensor values, Tensor[] offsets, SymInt[] max_lengths, float padding_value = 0) -> Tensor",
"torch.ops.fbgemm.merge_pooled_embeddings":"fbgemm::merge_pooled_embeddings(Tensor[] pooled_embeddings, SymInt uncat_dim_size, Device target_device, SymInt cat_dim=1) -> Tensor",
"torch.ops.fbgemm.permute_pooled_embs_auto_grad":"fbgemm::permute_pooled_embs_auto_grad(Tensor pooled_embs, Tensor offset_dim_list, Tensor permute_list, Tensor inv_offset_dim_list, Tensor inv_permute_list) -> Tensor",
"torch.ops.fbgemm.FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf":"fbgemm::FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf(Tensor input, int bit_rate) -> Tensor",
"torch.ops.fbgemm.permute_2D_sparse_data":"fbgemm::permute_2D_sparse_data(Tensor permute, Tensor lengths, Tensor values, Tensor? weights=None, SymInt? permuted_lengths_sum=None) -> (Tensor, Tensor, Tensor?)",
"torch.ops.fbgemm.permute_1D_sparse_data":"fbgemm::permute_1D_sparse_data(Tensor permute, Tensor lengths, Tensor values, Tensor? weights=None, SymInt? permuted_lengths_sum=None) -> (Tensor, Tensor, Tensor?)",
"torch.ops.fbgemm.expand_into_jagged_permute":"fbgemm::expand_into_jagged_permute(Tensor permute, Tensor input_offset, Tensor output_offset, SymInt output_size) -> Tensor",
"torch.ops.fbgemm.block_bucketize_sparse_features":"fbgemm::block_bucketize_sparse_features(Tensor lengths, Tensor indices, bool bucketize_pos, bool sequence, Tensor block_sizes, SymInt my_size, Tensor? weights=None, Tensor? batch_size_per_feature=None, SymInt max_B= -1, Tensor[]? block_bucketize_pos=None, bool keep_orig_idx=False) -> (Tensor, Tensor, Tensor?, Tensor?, Tensor?)",
"torch.ops.fbgemm.asynchronous_complete_cumsum":"fbgemm::asynchronous_complete_cumsum(Tensor t_in) -> Tensor",
"torch.ops.fbgemm.offsets_range":"fbgemm::offsets_range(Tensor offsets, SymInt range_size) -> Tensor",
"torch.ops.fbgemm.segment_sum_csr":"fbgemm::segment_sum_csr(SymInt batch_size, Tensor csr_seg, Tensor values) -> Tensor",
"torch.ops.fbgemm.keyed_jagged_index_select_dim1":"fbgemm::keyed_jagged_index_select_dim1(Tensor values, Tensor lengths, Tensor offsets, Tensor indices, SymInt batch_size, Tensor? weights=None, SymInt? selected_lengths_sum=None) -> Tensor[]"
}
},
{
"version":"1.1.0",
"api":{
"torch.ops.fbgemm.jagged_to_padded_dense":"fbgemm::jagged_to_padded_dense(Tensor values, Tensor[] offsets, SymInt[] max_lengths, float padding_value = 0) -> Tensor",
"torch.ops.fbgemm.merge_pooled_embeddings":"fbgemm::merge_pooled_embeddings(Tensor[] pooled_embeddings, SymInt uncat_dim_size, Device target_device, SymInt cat_dim=1) -> Tensor",
"torch.ops.fbgemm.permute_pooled_embs_auto_grad":"fbgemm::permute_pooled_embs_auto_grad(Tensor pooled_embs, Tensor offset_dim_list, Tensor permute_list, Tensor inv_offset_dim_list, Tensor inv_permute_list) -> Tensor",
"torch.ops.fbgemm.FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf":"fbgemm::FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf(Tensor input, int bit_rate) -> Tensor",
"torch.ops.fbgemm.permute_2D_sparse_data":"fbgemm::permute_2D_sparse_data(Tensor permute, Tensor lengths, Tensor values, Tensor? weights=None, SymInt? permuted_lengths_sum=None) -> (Tensor, Tensor, Tensor?)",
"torch.ops.fbgemm.permute_1D_sparse_data":"fbgemm::permute_1D_sparse_data(Tensor permute, Tensor lengths, Tensor values, Tensor? weights=None, SymInt? permuted_lengths_sum=None) -> (Tensor, Tensor, Tensor?)",
"torch.ops.fbgemm.expand_into_jagged_permute":"fbgemm::expand_into_jagged_permute(Tensor permute, Tensor input_offset, Tensor output_offset, SymInt output_size) -> Tensor",
"torch.ops.fbgemm.block_bucketize_sparse_features":"fbgemm::block_bucketize_sparse_features(Tensor lengths, Tensor indices, bool bucketize_pos, bool sequence, Tensor block_sizes, SymInt my_size, Tensor? weights=None, Tensor? batch_size_per_feature=None, SymInt max_B= -1, Tensor[]? block_bucketize_pos=None, bool keep_orig_idx=False, Tensor? total_num_blocks=None) -> (Tensor, Tensor, Tensor?, Tensor?, Tensor?)",
"torch.ops.fbgemm.asynchronous_complete_cumsum":"fbgemm::asynchronous_complete_cumsum(Tensor t_in) -> Tensor",
"torch.ops.fbgemm.offsets_range":"fbgemm::offsets_range(Tensor offsets, SymInt range_size) -> Tensor",
"torch.ops.fbgemm.segment_sum_csr":"fbgemm::segment_sum_csr(SymInt batch_size, Tensor csr_seg, Tensor values) -> Tensor",
"torch.ops.fbgemm.keyed_jagged_index_select_dim1":"fbgemm::keyed_jagged_index_select_dim1(Tensor values, Tensor lengths, Tensor offsets, Tensor indices, SymInt batch_size, Tensor? weights=None, SymInt? selected_lengths_sum=None) -> Tensor[]"
}
}
]
}
52 changes: 35 additions & 17 deletions fbgemm_gpu/test/release/stable_release_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,31 +113,49 @@ def check_schema_compatibility_from_op_name(


class StableRelease(TestSuite): # pyre-ignore[11]
def test_stable_schema(self) -> None:
def _test_stable_schema(self, version: str) -> None:
"""
Test the schema compatibility of the operators against stable schema.
This is to ensure that any changes to the ops' schema do not break compatibility of the stable versions.
This test will fail if the current op schema is not forward or backward compatible with the stable schema.
"""

# Load stable ops from file into dict
stable_dict_file = open(
get_file_path_2("", os.path.dirname(__file__), "stable_ops.json")
majorversion = version.split(".")[0]
filepath = get_file_path_2(
"", os.path.dirname(__file__), f"stable_ops_v{majorversion}.json"
)
stable_op_dict = json.load(stable_dict_file)["data"]
stable_dict_file.close()
# Get all op names
stable_op_names = set(stable_op_dict.keys())

# Check compatibility for all ops that are marked stable
for full_op_name in stable_op_names:
# Test the schema given the op name
ref_schema_str = stable_op_dict[full_op_name]
op_name = full_op_name.split(".")[3]

check_schema_compatibility_from_op_name(
torch.ops.fbgemm, op_name, ref_schema_str
)
# Load stable ops from file into dict
with open(filepath) as file:
for release_info in [
info
for info in json.load(file)["releases"]
if info["version"] == version
]:
stable_op_dict = release_info["api"]

# Get all op names
stable_op_names = set(stable_op_dict.keys())

# Check compatibility for all ops that are marked stable
for full_op_name in stable_op_names:
# Test the schema given the op name
ref_schema_str = stable_op_dict[full_op_name]
op_name = full_op_name.split(".")[3]

check_schema_compatibility_from_op_name(
torch.ops.fbgemm, op_name, ref_schema_str
)

def test_backwards_compatibility(self) -> None:
"""
Test the schema compatibility of the operators against previous versions of the API.
"""
for version in ["1.0.0", "1.1.0"]:
try:
self._test_stable_schema(version)
except Exception as e:
self.fail(f"Compatibility test failed for version {version}: {e}")

def test_example_ops(self) -> None:
"""
Expand Down

0 comments on commit 5dbeebf

Please sign in to comment.