diff --git a/.github/workflows/_run-e2e-single.yaml b/.github/workflows/_run-e2e-single.yaml index 97fa6b5705..d86da50d2d 100644 --- a/.github/workflows/_run-e2e-single.yaml +++ b/.github/workflows/_run-e2e-single.yaml @@ -1,3 +1,6 @@ +# WARNING: This file is used by multiple workflows. Please modify with caution. Any changes here may affect multiple +# CI/CD pipelines. + name: Run Single E2E Test with multiple Python versions on: @@ -13,6 +16,16 @@ on: required: true type: string description: JSON array of Python versions + ref: + required: false + type: string + description: Git ref to checkout + default: "" + artifact-name: + required: false + type: string + description: Docker image artifact name + default: "subtensor-localnet" jobs: run-e2e: @@ -28,6 +41,8 @@ jobs: steps: - name: Check-out repository uses: actions/checkout@v4 + with: + ref: ${{ inputs.ref != '' && inputs.ref || github.ref }} - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 @@ -57,10 +72,10 @@ jobs: - name: Download Cached Docker Image uses: actions/download-artifact@v4 with: - name: subtensor-localnet + name: ${{ inputs.artifact-name }} - name: Load Docker Image - run: docker load -i subtensor-localnet.tar + run: docker load -i ${{ inputs.artifact-name }}.tar - name: Run test with retry env: diff --git a/.github/workflows/e2e-subtensor-tests.yaml b/.github/workflows/e2e-subtensor-tests.yaml index b83308ad12..93e64e9826 100644 --- a/.github/workflows/e2e-subtensor-tests.yaml +++ b/.github/workflows/e2e-subtensor-tests.yaml @@ -205,7 +205,7 @@ jobs: - read-python-versions strategy: fail-fast: false - max-parallel: 16 + max-parallel: 64 matrix: include: ${{ fromJson(needs.find-tests.outputs.test-files) }} uses: ./.github/workflows/_run-e2e-single.yaml diff --git a/.github/workflows/nightly-e2e-tests-subtensor-main.yml b/.github/workflows/nightly-e2e-tests-subtensor-main.yml index 49b914871a..d8c8d44419 100644 --- a/.github/workflows/nightly-e2e-tests-subtensor-main.yml +++ b/.github/workflows/nightly-e2e-tests-subtensor-main.yml @@ -35,15 +35,59 @@ jobs: - name: Check-out repository under $GITHUB_WORKSPACE uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.10' + + - name: Install uv + uses: astral-sh/setup-uv@v4 + with: + enable-cache: false + cache-dependency-glob: '**/pyproject.toml' + ignore-nothing-to-cache: true + + - name: Cache uv and venv + uses: actions/cache@v4 + with: + path: | + ~/.cache/uv + .venv + key: uv-${{ runner.os }}-py3.10-${{ hashFiles('pyproject.toml') }} + restore-keys: uv-${{ runner.os }}-py3.10- + + - name: Install dependencies (faster if cache hit) + run: uv sync --extra dev --dev + - name: Find test files id: get-tests - run: | - test_files=$(find tests/e2e_tests -name "test*.py" | jq -R -s -c 'split("\n") | map(select(. != ""))') - # keep it here for future debug - # test_files=$(find tests/e2e_tests -type f -name "test*.py" | grep -E 'test_(hotkeys|staking)\.py$' | jq -R -s -c 'split("\n") | map(select(. != ""))') - echo "Found test files: $test_files" - echo "test-files=$test_files" >> "$GITHUB_OUTPUT" shell: bash + run: | + set -euo pipefail + test_matrix=$( + uv run pytest -q --collect-only tests/e2e_tests \ + | sed -n '/^e2e_tests\//p' \ + | sed 's|^|tests/|' \ + | jq -R -s -c ' + split("\n") + | map(select(. != "")) + | map({nodeid: ., label: (sub("^tests/e2e_tests/"; ""))}) + ' + ) + echo "Found tests: $test_matrix" + echo "test-files=$test_matrix" >> "$GITHUB_OUTPUT" + + # Read Python versions + read-python-versions: + runs-on: ubuntu-latest + outputs: + python-versions: ${{ steps.read-versions.outputs.versions }} + steps: + - uses: actions/checkout@v4 + - id: read-versions + run: | + versions=$(cat .github/supported-python-versions.json) + echo "versions=$versions" >> $GITHUB_OUTPUT # Pull docker images (devnet-ready and main) pull-docker-images: @@ -77,343 +121,58 @@ jobs: name: subtensor-localnet-devnet-ready path: subtensor-localnet-devnet-ready.tar - # Determine the day for non-fast-blocks run - check-if-non-fast-blocks-run: - runs-on: ubuntu-latest - outputs: - non-fast-blocks-run: ${{ steps.check.outputs.non-fast-blocks-run }} - steps: - - id: check - run: | - if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then - echo "πŸ” Manual trigger detected. Forcing non-fast-blocks-run=true" - echo "non-fast-blocks-run=true" >> "$GITHUB_OUTPUT" - else - day=$(date -u +%u) - echo "Today is weekday $day" - if [ "$day" -ne 6 ]; then - echo "⏭️ Skipping: not Saturday" - echo "non-fast-blocks-run=false" >> "$GITHUB_OUTPUT" - exit 0 - fi - echo "βœ… It is Saturday" - echo "non-fast-blocks-run=true" >> "$GITHUB_OUTPUT" - fi - # Daily run of fast-blocks tests from `bittensor:master` based on `subtensor:main docker` image run-fast-blocks-e2e-test-master: - name: "FB master: ${{ matrix.test-file }} / Python ${{ matrix.python-version }}" + name: "master: ${{ matrix.label }}" needs: - find-tests - pull-docker-images - runs-on: ubuntu-latest - timeout-minutes: 25 - outputs: - failed: ${{ steps.test-failed.outputs.failed }} - + - read-python-versions strategy: - fail-fast: false # Allow other matrix jobs to run even if this job fails - max-parallel: 32 # Set the maximum number of parallel jobs (same as we have cores in ubuntu-latest runner) + fail-fast: false + max-parallel: 64 matrix: - os: - - ubuntu-latest - test-file: ${{ fromJson(needs.find-tests.outputs.test-files) }} - python-version: ${{ fromJson(vars.SDK_SUPPORTED_PYTHON_VERSIONS) }} - steps: - - name: Check-out repository - uses: actions/checkout@v4 - with: - ref: master - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - - name: Install uv - uses: astral-sh/setup-uv@v4 - - - name: install dependencies - run: uv sync --extra dev --dev - - - name: Download Cached Docker Image - uses: actions/download-artifact@v4 - with: - name: subtensor-localnet-main - - - name: Load Docker Image - run: docker load -i subtensor-localnet-main.tar - - - name: Run tests with retry - id: test-failed - env: - FAST_BLOCKS: "1" - LOCALNET_IMAGE_NAME: "ghcr.io/opentensor/subtensor-localnet:main" - run: | - set +e - for i in 1 2 3; do - echo "::group::πŸ” Test attempt $i" - uv run pytest ${{ matrix.test-file }} -s - status=$? - if [ $status -eq 0 ]; then - echo "βœ… Tests passed on attempt $i" - echo "::endgroup::" - echo "failed=false" >> "$GITHUB_OUTPUT" - break - else - echo "❌ Tests failed on attempt $i" - echo "::endgroup::" - if [ $i -eq 3 ]; then - echo "Tests failed after 3 attempts" - echo "failed=true" >> "$GITHUB_OUTPUT" - exit 1 - fi - echo "Retrying..." - sleep 5 - fi - done + include: ${{ fromJson(needs.find-tests.outputs.test-files) }} + uses: ./.github/workflows/_run-e2e-single.yaml + with: + nodeid: ${{ matrix.nodeid }} + image-name: ghcr.io/opentensor/subtensor-localnet:main + python-versions: ${{ needs.read-python-versions.outputs.python-versions }} + ref: master + artifact-name: subtensor-localnet-main + secrets: inherit # Daily run of fast-blocks tests from `bittensor:staging` based on `subtensor:devnet-ready` docker image run-fast-blocks-e2e-test-staging: - name: "FB staging: ${{ matrix.test-file }} / Python ${{ matrix.python-version }}" - needs: - - find-tests - - pull-docker-images - runs-on: ubuntu-latest - timeout-minutes: 25 - outputs: - failed: ${{ steps.test-failed.outputs.failed }} - - strategy: - fail-fast: false # Allow other matrix jobs to run even if this job fails - max-parallel: 32 # Set the maximum number of parallel jobs (same as we have cores in ubuntu-latest runner) - matrix: - os: - - ubuntu-latest - test-file: ${{ fromJson(needs.find-tests.outputs.test-files) }} - python-version: ${{ fromJson(vars.SDK_SUPPORTED_PYTHON_VERSIONS) }} - steps: - - name: Check-out repository - uses: actions/checkout@v4 - with: - ref: staging - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - - name: Install uv - uses: astral-sh/setup-uv@v4 - - - name: install dependencies - run: uv sync --extra dev --dev - - - name: Download Cached Docker Image - uses: actions/download-artifact@v4 - with: - name: subtensor-localnet-devnet-ready - - - name: Load Docker Image - run: docker load -i subtensor-localnet-devnet-ready.tar - - - name: Run tests with retry - id: test-failed - env: - FAST_BLOCKS: "1" - LOCALNET_IMAGE_NAME: "ghcr.io/opentensor/subtensor-localnet:devnet-ready" - run: | - set +e - for i in 1 2 3; do - echo "::group::πŸ” Test attempt $i" - uv run pytest ${{ matrix.test-file }} -s - status=$? - if [ $status -eq 0 ]; then - echo "βœ… Tests passed on attempt $i" - echo "::endgroup::" - echo "failed=false" >> "$GITHUB_OUTPUT" - break - else - echo "❌ Tests failed on attempt $i" - echo "::endgroup::" - if [ $i -eq 3 ]; then - echo "Tests failed after 3 attempts" - echo "failed=true" >> "$GITHUB_OUTPUT" - exit 1 - fi - echo "Retrying..." - sleep 5 - fi - done - - # Saturday run of non-fast-blocks tests from `bittensor:master` based on `subtensor:main` docker image - run-non-fast-blocks-e2e-test-master: - if: needs.check-if-non-fast-blocks-run.outputs.non-fast-blocks-run == 'true' - name: "NFB master: ${{ matrix.test-file }} / Python ${{ matrix.python-version }}" - needs: - - check-if-non-fast-blocks-run - - find-tests - - pull-docker-images - runs-on: ubuntu-latest - timeout-minutes: 1440 - outputs: - failed: ${{ steps.test-failed.outputs.failed }} - - strategy: - fail-fast: false # Allow other matrix jobs to run even if this job fails - max-parallel: 32 # Set the maximum number of parallel jobs (same as we have cores in ubuntu-latest runner) - matrix: - os: - - ubuntu-latest - test-file: ${{ fromJson(needs.find-tests.outputs.test-files) }} - python-version: ${{ fromJson(vars.SDK_SUPPORTED_PYTHON_VERSIONS) }} - - steps: - - name: Check-out repository - uses: actions/checkout@v4 - with: - ref: master - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - - name: Install uv - uses: astral-sh/setup-uv@v4 - - - name: install dependencies - run: uv sync --extra dev --dev - - - name: Download Cached Docker Image - uses: actions/download-artifact@v4 - with: - name: subtensor-localnet-main - - - name: Load Docker Image - run: docker load -i subtensor-localnet-main.tar - - - name: Run patched E2E tests - id: test-failed - env: - FAST_BLOCKS: "0" - LOCALNET_IMAGE_NAME: "ghcr.io/opentensor/subtensor-localnet:main" - run: | - set +e - for i in 1 2 3; do - echo "::group::πŸ” Test attempt $i" - uv run pytest ${{ matrix.test-file }} -s - status=$? - if [ $status -eq 0 ]; then - echo "βœ… Tests passed on attempt $i" - echo "::endgroup::" - echo "failed=false" >> "$GITHUB_OUTPUT" - break - else - echo "❌ Tests failed on attempt $i" - echo "::endgroup::" - if [ $i -eq 3 ]; then - echo "Tests failed after 3 attempts" - echo "failed=true" >> "$GITHUB_OUTPUT" - exit 1 - fi - echo "Retrying..." - sleep 5 - fi - done - - # Saturday run of non-fast-blocks tests from `bittensor:staging` based on `subtensor:devnet-ready` docker image - run-non-fast-blocks-e2e-test-staging: - if: needs.check-if-non-fast-blocks-run.outputs.non-fast-blocks-run == 'true' - name: "NFB staging: ${{ matrix.test-file }} / Python ${{ matrix.python-version }}" + name: "staging: ${{ matrix.label }}" needs: - - check-if-non-fast-blocks-run - find-tests - pull-docker-images - runs-on: ubuntu-latest - timeout-minutes: 1440 - outputs: - failed: ${{ steps.test-failed.outputs.failed }} - + - read-python-versions strategy: - fail-fast: false # Allow other matrix jobs to run even if this job fails - max-parallel: 32 # Set the maximum number of parallel jobs (same as we have cores in ubuntu-latest runner) + fail-fast: false + max-parallel: 64 matrix: - os: - - ubuntu-latest - test-file: ${{ fromJson(needs.find-tests.outputs.test-files) }} - python-version: ${{ fromJson(vars.SDK_SUPPORTED_PYTHON_VERSIONS) }} - - steps: - - name: Check-out repository - uses: actions/checkout@v4 - with: - ref: staging - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - - name: Install uv - uses: astral-sh/setup-uv@v4 - - - name: install dependencies - run: uv sync --extra dev --dev - - - name: Download Cached Docker Image - uses: actions/download-artifact@v4 - with: - name: subtensor-localnet-devnet-ready - - - name: Load Docker Image - run: docker load -i subtensor-localnet-devnet-ready.tar - - - name: Run patched E2E tests - id: test-failed - env: - FAST_BLOCKS: "0" - LOCALNET_IMAGE_NAME: "ghcr.io/opentensor/subtensor-localnet:devnet-ready" - run: | - set +e - for i in 1 2 3; do - echo "::group::πŸ” Test attempt $i" - uv run pytest ${{ matrix.test-file }} -s - status=$? - if [ $status -eq 0 ]; then - echo "βœ… Tests passed on attempt $i" - echo "::endgroup::" - echo "failed=false" >> "$GITHUB_OUTPUT" - break - else - echo "❌ Tests failed on attempt $i" - echo "::endgroup::" - if [ $i -eq 3 ]; then - echo "Tests failed after 3 attempts" - echo "failed=true" >> "$GITHUB_OUTPUT" - exit 1 - fi - echo "Retrying..." - sleep 5 - fi - done + include: ${{ fromJson(needs.find-tests.outputs.test-files) }} + uses: ./.github/workflows/_run-e2e-single.yaml + with: + nodeid: ${{ matrix.nodeid }} + image-name: ghcr.io/opentensor/subtensor-localnet:devnet-ready + python-versions: ${{ needs.read-python-versions.outputs.python-versions }} + ref: staging + artifact-name: subtensor-localnet-devnet-ready + secrets: inherit # Send centralized Discord failure notification notify-on-failure: needs: - run-fast-blocks-e2e-test-master - run-fast-blocks-e2e-test-staging - - run-non-fast-blocks-e2e-test-master - - run-non-fast-blocks-e2e-test-staging - if: | - needs.run-fast-blocks-e2e-test-master.outputs.failed == 'true' || - needs.run-fast-blocks-e2e-test-staging.outputs.failed == 'true' || - needs.run-non-fast-blocks-e2e-test-master.outputs.failed == 'true' || - needs.run-non-fast-blocks-e2e-test-staging.outputs.failed == 'true' + if: always() && (needs.run-fast-blocks-e2e-test-master.result == 'failure' || needs.run-fast-blocks-e2e-test-staging.result == 'failure') runs-on: ubuntu-latest steps: - name: Send centralized Discord failure notification run: | curl -X POST -H "Content-Type: application/json" \ - -d "{\"username\": \"Nightly CI\", \"content\": \"❌ Nightly E2E tests failed. Check run: <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}>\"}" \ + -d "{\"username\": \"SDK\", \"content\": \"❌ Nightly E2E tests failed. Check run: <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}>\"}" \ "${{ secrets.NIGHTLY_WEBHOOK_URL }}" diff --git a/.github/workflows/unit-and-integration-tests.yml b/.github/workflows/unit-and-integration-tests.yml index 507a48c098..a6a525981e 100644 --- a/.github/workflows/unit-and-integration-tests.yml +++ b/.github/workflows/unit-and-integration-tests.yml @@ -54,7 +54,7 @@ jobs: uv-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}- - name: Sync dev deps (idempotent; fast on cache hit) - run: uv sync --extra dev --dev + run: uv sync --extra dev --dev --extra torch - name: Unit tests timeout-minutes: 20 diff --git a/CHANGELOG.md b/CHANGELOG.md index 6cb3aadb51..4b36f0210f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 10.0.1 /2025-12-22 + +## What's Changed +* Improve GH workflows by @basfroman in https://github.com/opentensor/bittensor/pull/3207 +* Small but still important changes by @basfroman in https://github.com/opentensor/bittensor/pull/3210 +* Refactor neuron and metagraph by @basfroman in https://github.com/opentensor/bittensor/pull/3214 +* Fix docstrings by @basfroman in https://github.com/opentensor/bittensor/pull/3217 +* Fix nightly workflow by @basfroman in https://github.com/opentensor/bittensor/pull/3225 +* update contrib by @thewhaleking in https://github.com/opentensor/bittensor/pull/3209 +* MeV: fix for weird edge case by @basfroman in https://github.com/opentensor/bittensor/pull/3228 + +**Full Changelog**: https://github.com/opentensor/bittensor/compare/v10.0.0...v10.0.1 + ## 10.0.0 /2025-12-09 ## What's Changed diff --git a/bittensor/core/async_subtensor.py b/bittensor/core/async_subtensor.py index 3207e908e3..6dab64e150 100644 --- a/bittensor/core/async_subtensor.py +++ b/bittensor/core/async_subtensor.py @@ -534,29 +534,6 @@ async def _query_with_fallback( Raises: ValueError: If no default value is provided, and none of the methods exist at the given block, a ValueError will be raised. - - Example: - - value = await self._query_with_fallback( - - # the first attempt will be made to SubtensorModule.MechanismEmissionSplit with params [1] - - ("SubtensorModule", "MechanismEmissionSplit", [1]), - - # if it does not exist at the given block, the next attempt will be made to - - # SubtensorModule.MechanismEmission with params None - - ("SubtensorModule", "MechanismEmission", None), - - block_hash="0x1234", - - # if none of the methods exist at the given block, the default value of None will be returned - - default_value=None, - - ) - """ if block_hash is None: block_hash = await self.substrate.get_chain_head() @@ -595,37 +572,6 @@ async def _runtime_call_with_fallback( Raises: ValueError: If no default value is provided, and none of the methods exist at the given block, a ValueError will be raised. - - Example: - - query = await self._runtime_call_with_fallback( - - # the first attempt will be made to SubnetInfoRuntimeApi.get_selective_mechagraph with the - - # given params - - ( - - "SubnetInfoRuntimeApi", - - "get_selective_mechagraph", - - [netuid, mechid, [f for f in range(len(SelectiveMetagraphIndex))]], - - ), - - # if it does not exist at the given block, the next attempt will be made as such: - - ("SubnetInfoRuntimeApi", "get_metagraph", [[netuid]]), - - block_hash=block_hash, - - # if none of the methods exist at the given block, the default value will be returned - - default_value=None, - - ) - """ if block_hash is None: block_hash = await self.substrate.get_chain_head() @@ -720,17 +666,11 @@ async def sim_swap( Example: # Simulate staking 100 TAO stake to subnet 1 - result = await subtensor.sim_swap( - origin_netuid=0, - destination_netuid=1, - amount=Balance.from_tao(100) - ) - print(f"Fee: {result.tao_fee.tao} TAO, Output: {result.alpha_amount} Alpha") Notes: @@ -1212,13 +1152,9 @@ async def bonds( Example: # Get bonds for subnet 1 - bonds = await subtensor.bonds(netuid=1) - print(bonds[0]) - # example output: (5, [(0, 32767), (1, 16383), (3, 8191)]) - # This means validator UID 5 has bonds: 50% to miner 0, 25% to miner 1, 12.5% to miner 3 Notes: @@ -1911,13 +1847,9 @@ async def get_children( Example: # Get children for a hotkey in subnet 1 - success, children, error = await subtensor.get_children(hotkey="5F...", netuid=1) - if success: - for proportion, child_hotkey in children: - print(f"Child {child_hotkey}: {proportion}") Notes: @@ -1970,7 +1902,6 @@ async def get_children_pending( Returns: tuple: A tuple containing: - - list[tuple[float, str]]: A list of children with their proportions. - int: The cool-down block number. @@ -2101,13 +2032,14 @@ async def get_crowdloan_constants( Otherwise, all known constants defined in `CrowdloanConstants.field_names()` are fetched. These constants define requirements and operational limits for crowdloan campaigns: - - - `AbsoluteMinimumContribution`: Minimum amount per contribution (TAO). - - `MaxContributors`: Maximum number of unique contributors per crowdloan. - - `MaximumBlockDuration`: Maximum duration (in blocks) for a crowdloan campaign (60 days = 432,000 blocks on production). - - `MinimumDeposit`: Minimum deposit required from the creator (TAO). - - `MinimumBlockDuration`: Minimum duration (in blocks) for a crowdloan campaign (7 days = 50,400 blocks on production). - - `RefundContributorsLimit`: Maximum number of contributors refunded per `refund_crowdloan` call (typically 50). + AbsoluteMinimumContribution: Minimum amount per contribution (TAO). + MaxContributors: Maximum number of unique contributors per crowdloan. + MaximumBlockDuration: Maximum duration (in blocks) for a crowdloan campaign (60 days = 432,000 blocks on + production). + MinimumDeposit: Minimum deposit required from the creator (TAO). + MinimumBlockDuration: Minimum duration (in blocks) for a crowdloan campaign (7 days = 50,400 blocks on + production). + RefundContributorsLimit: Maximum number of contributors refunded per `refund_crowdloan` call (typically 50). Parameters: constants: Specific constant names to query. If `None`, retrieves all constants from `CrowdloanConstants`. @@ -2976,24 +2908,18 @@ async def get_metagraph_info( Example: # Retrieve all fields from the metagraph from subnet 2 mechanism 0 - meta_info = subtensor.get_metagraph_info(netuid=2) # Retrieve all fields from the metagraph from subnet 2 mechanism 1 - meta_info = subtensor.get_metagraph_info(netuid=2, mechid=1) # Retrieve selective data from the metagraph from subnet 2 mechanism 0 - partial_meta_info = subtensor.get_metagraph_info( - netuid=2, - selected_indices=[SelectiveMetagraphIndex.Name, SelectiveMetagraphIndex.OwnerHotkeys] ) # Retrieve selective data from the metagraph from subnet 2 mechanism 1 - partial_meta_info = subtensor.get_metagraph_info( netuid=2, mechid=1, @@ -4935,13 +4861,13 @@ async def filter_netuids_by_registered_hotkeys( as long as they match filter_for_netuids. Parameters: - all_netuids (Iterable[int]): A list of netuids to consider for filtering. - filter_for_netuids (Iterable[int]): A subset of netuids to restrict the result to. If None/empty, returns + all_netuids: A list of netuids to consider for filtering. + filter_for_netuids: A subset of netuids to restrict the result to. If None/empty, returns all netuids with registered hotkeys. - all_hotkeys (Iterable[Wallet]): Hotkeys to check for registration. - block (Optional[int]): The blockchain block number for the query. - block_hash (Optional[str]): hash of the blockchain block number at which to perform the query. - reuse_block (bool): whether to reuse the last-used blockchain hash when retrieving info. + all_hotkeys: Hotkeys to check for registration. + block: The blockchain block number for the query. + block_hash: hash of the blockchain block number at which to perform the query. + reuse_block: whether to reuse the last-used blockchain hash when retrieving info. Returns: The filtered list of netuids (union of filtered all_netuids and registered hotkeys). @@ -5027,8 +4953,8 @@ async def is_in_admin_freeze_window( with validator weight submissions. Parameters: - netuid (int): The unique identifier of the subnet. - block (Optional[int]): The blockchain block number for the query. + netuid: The unique identifier of the subnet. + block: The blockchain block number for the query. block_hash: The blockchain block_hash representation of the block id. reuse_block: Whether to reuse the last-used blockchain block hash. @@ -5272,8 +5198,8 @@ async def max_weight_limit( Returns: The stored maximum weight limit as a normalized float in [0, 1], or `None` if the subnetwork - does not exist. Note: this value is not actually enforced - the weight validation code uses - a hardcoded u16::MAX instead. + does not exist. Note: this value is not enforced - the weight validation code uses a hardcoded u16::MAX + instead. Notes: - This hyperparameter is now a constant rather than a settable variable. @@ -5743,7 +5669,6 @@ async def wait_for_block(self, block: Optional[int] = None) -> bool: Example: # Waits for a specific block - await subtensor.wait_for_block(block=1234) """ @@ -6098,19 +6023,12 @@ async def get_extrinsic_fee( Example: # Estimate fee before sending a transfer - call = await subtensor.compose_call( - call_module="Balances", - call_function="transfer", - call_params={"dest": destination_ss58, "value": amount.rao} - ) - fee = await subtensor.get_extrinsic_fee(call=call, keypair=wallet.coldkey) - print(f"Estimated fee: {fee.tao} TAO") Notes: @@ -6222,7 +6140,7 @@ async def add_liquidity( price_low: The lower bound of the price tick range. In TAO. price_high: The upper bound of the price tick range. In TAO. hotkey_ss58: The hotkey with staked TAO in Alpha. If not passed then the wallet hotkey is used. - mev_protection:` If` True, encrypts and submits the transaction through the MEV Shield pallet to protect + mev_protection: If `True`, encrypts and submits the transaction through the MEV Shield pallet to protect against front-running and MEV attacks. The transaction remains encrypted in the mempool until validators decrypt and execute it. If `False`, submits the transaction directly without encryption. period: The number of blocks during which the transaction will remain valid after it's submitted. If @@ -6580,7 +6498,7 @@ async def commit_weights( mechid: The subnet mechanism unique identifier. version_key: Version key for compatibility with the network. max_attempts: The number of maximum attempts to commit weights. - mev_protection: `If` True, encrypts and submits the transaction through the MEV Shield pallet to protect + mev_protection: If `True`, encrypts and submits the transaction through the MEV Shield pallet to protect against front-running and MEV attacks. The transaction remains encrypted in the mempool until validators decrypt and execute it. If `False`, submits the transaction directly without encryption. period: The number of blocks during which the transaction will remain valid after it's submitted. If @@ -7084,7 +7002,7 @@ async def mev_submit_encrypted( have successfully decrypted and executed the inner call. If True, the function will poll subsequent blocks for the event matching this submission's commitment. blocks_for_revealed_execution: Maximum number of blocks to poll for the executed event after - inclusion. The function checks blocks from start_block+1 to start_block + blocks_for_revealed_execution. + inclusion. The function checks blocks from start_block to start_block + blocks_for_revealed_execution. Returns immediately if the event is found before the block limit is reached. Returns: @@ -7143,7 +7061,7 @@ async def modify_liquidity( position_id: The id of the position record in the pool. liquidity_delta: The amount of liquidity to be added or removed (add if positive or remove if negative). hotkey_ss58: The hotkey with staked TAO in Alpha. If not passed then the wallet hotkey is used. - mev_protection:` If` True, encrypts and submits the transaction through the MEV Shield pallet to protect + mev_protection: If `True`, encrypts and submits the transaction through the MEV Shield pallet to protect against front-running and MEV attacks. The transaction remains encrypted in the mempool until validators decrypt and execute it. If `False`, submits the transaction directly without encryption. period: The number of blocks during which the transaction will remain valid after it's submitted. If @@ -7158,45 +7076,27 @@ async def modify_liquidity( ExtrinsicResponse: The result object of the extrinsic execution. Example: - import bittensor as bt - subtensor = bt.AsyncSubtensor(network="local") - await subtensor.initialize() - my_wallet = bt.Wallet() # if liquidity_delta is negative - my_liquidity_delta = Balance.from_tao(100) * -1 - await subtensor.modify_liquidity( - wallet=my_wallet, - netuid=123, - position_id=2, - liquidity_delta=my_liquidity_delta - ) # if liquidity_delta is positive - my_liquidity_delta = Balance.from_tao(120) - await subtensor.modify_liquidity( - wallet=my_wallet, - netuid=123, - position_id=2, - liquidity_delta=my_liquidity_delta - ) Note: @@ -7302,7 +7202,7 @@ async def poke_deposit( Parameters: wallet: Bittensor wallet object (the account whose deposits will be adjusted). - mev_protection: `If` True, encrypts and submits the transaction through the MEV Shield pallet to protect + mev_protection: If `True`, encrypts and submits the transaction through the MEV Shield pallet to protect against front-running and MEV attacks. The transaction remains encrypted in the mempool until validators decrypt and execute it. If `False`, submits the transaction directly without encryption. period: The number of blocks during which the transaction will remain valid after it's submitted. @@ -7759,7 +7659,7 @@ async def remove_liquidity( netuid: The UID of the target subnet for which the call is being initiated. position_id: The id of the position record in the pool. hotkey_ss58: The hotkey with staked TAO in Alpha. If not passed then the wallet hotkey is used. - mev_protection:` If` True, encrypts and submits the transaction through the MEV Shield pallet to protect + mev_protection: If `True`, encrypts and submits the transaction through the MEV Shield pallet to protect against front-running and MEV attacks. The transaction remains encrypted in the mempool until validators decrypt and execute it. If `False`, submits the transaction directly without encryption. period: The number of blocks during which the transaction will remain valid after it's submitted. If @@ -8218,7 +8118,7 @@ async def set_delegate_take( wallet: bittensor wallet instance. hotkey_ss58: The `SS58` address of the neuron's hotkey. take: Percentage reward for the delegate. - mev_protection:` If` True, encrypts and submits the transaction through the MEV Shield pallet to protect + mev_protection: If `True`, encrypts and submits the transaction through the MEV Shield pallet to protect against front-running and MEV attacks. The transaction remains encrypted in the mempool until validators decrypt and execute it. If `False`, submits the transaction directly without encryption. period: The number of blocks during which the transaction will remain valid after it's @@ -8364,7 +8264,7 @@ async def set_subnet_identity( netuid: The unique ID of the network on which the operation takes place. subnet_identity: The identity data of the subnet including attributes like name, GitHub repository, contact, URL, discord, description, and any additional metadata. - mev_protection:` If` True, encrypts and submits the transaction through the MEV Shield pallet to protect + mev_protection: If `True`, encrypts and submits the transaction through the MEV Shield pallet to protect against front-running and MEV attacks. The transaction remains encrypted in the mempool until validators decrypt and execute it. If `False`, submits the transaction directly without encryption. period: The number of blocks during which the transaction will remain valid after it's @@ -8456,17 +8356,11 @@ async def set_weights( Example: # Set weights directly (for non-commit-reveal subnets) - response = await subtensor.set_weights( - wallet=wallet, - netuid=1, - uids=[0, 1, 2], - weights=[0.5, 0.3, 0.2] - ) # For commit-reveal subnets, the method automatically handles commit and reveal phases @@ -8870,7 +8764,6 @@ async def swap_stake( rate_tolerance: The maximum allowed increase in the price ratio between subnets (origin_price/destination_price). For example, 0.005 = 0.5% maximum increase. Only used when `safe_swapping` is `True`. - safe_staking is True. mev_protection: If `True`, encrypts and submits the transaction through the MEV Shield pallet to protect against front-running and MEV attacks. The transaction remains encrypted in the mempool until validators decrypt and execute it. If `False`, submits the transaction directly without encryption. @@ -9003,7 +8896,7 @@ async def transfer( keep_alive: If `True`, ensures the source account maintains at least the existential deposit amount. If `False`, the transfer may reduce the balance below the existential deposit, potentially causing the account to be reaped. - mev_protection:` If` True, encrypts and submits the transaction through the MEV Shield pallet to protect + mev_protection: If `True`, encrypts and submits the transaction through the MEV Shield pallet to protect against front-running and MEV attacks. The transaction remains encrypted in the mempool until validators decrypt and execute it. If `False`, submits the transaction directly without encryption. period: The number of blocks during which the transaction will remain valid after it's submitted. If the @@ -9219,59 +9112,34 @@ async def unstake_all( # value: import bittensor as bt - subtensor = bt.AsyncSubtensor() - wallet = bt.Wallet("my_wallet") - netuid = 14 - hotkey = "5%SOME_HOTKEY_WHERE_IS_YOUR_STAKE_NOW%" - wallet_stakes = await subtensor.get_stake_info_for_coldkey(coldkey_ss58=wallet.coldkey.ss58_address) - for stake in wallet_stakes: - result = await subtensor.unstake_all( - wallet=wallet, - hotkey_ss58=stake.hotkey_ss58, - netuid=stake.netuid, - ) - print(result) # If you would like to unstake all stakes in all subnets unsafely, use rate_tolerance=None: import bittensor as bt - subtensor = bt.AsyncSubtensor() - wallet = bt.Wallet("my_wallet") - netuid = 14 - hotkey = "5%SOME_HOTKEY_WHERE_IS_YOUR_STAKE_NOW%" - wallet_stakes = await subtensor.get_stake_info_for_coldkey(coldkey_ss58=wallet.coldkey.ss58_address) - for stake in wallet_stakes: - result = await subtensor.unstake_all( - wallet=wallet, - hotkey_ss58=stake.hotkey_ss58, - netuid=stake.netuid, - rate_tolerance=None, - ) - print(result) Notes: @@ -9604,11 +9472,9 @@ async def get_async_subtensor( Example: # Create and initialize in one step - subtensor = await get_async_subtensor(network="finney") # Ready to use immediately - block = await subtensor.get_current_block() """ diff --git a/bittensor/core/chain_data/neuron_info.py b/bittensor/core/chain_data/neuron_info.py index 54d99d7eb0..6c3b89293d 100644 --- a/bittensor/core/chain_data/neuron_info.py +++ b/bittensor/core/chain_data/neuron_info.py @@ -26,18 +26,15 @@ class NeuronInfo(InfoBase): stake: The balance staked to this neuron. stake_dict: A dictionary mapping coldkey to the amount staked. total_stake: The total amount of stake. - rank: The rank score of the neuron. emission: The emission rate. incentive: The incentive value. consensus: The consensus score. - trust: The trust score. validator_trust: The validation trust score. dividends: The dividends value. last_update: The timestamp of the last update. validator_permit: Validator permit status. weights: List of weights associated with the neuron. bonds: List of bonds associated with the neuron. - pruning_score: The pruning score of the neuron. prometheus_info: Information related to Prometheus. axon_info: Information related to Axon. is_null: Indicator if this is a null neuron. @@ -52,18 +49,15 @@ class NeuronInfo(InfoBase): # mapping of coldkey to amount staked to this Neuron stake_dict: dict[str, "Balance"] total_stake: "Balance" - rank: float emission: float incentive: float consensus: float - trust: float validator_trust: float dividends: float last_update: int validator_permit: bool weights: list[tuple[int, int]] bonds: list[list[int]] - pruning_score: int prometheus_info: Optional["PrometheusInfo"] = None axon_info: Optional["AxonInfo"] = None is_null: bool = False @@ -104,11 +98,9 @@ def get_null_neuron() -> "NeuronInfo": stake=Balance.from_rao(0), stake_dict={}, total_stake=Balance.from_rao(0), - rank=0, emission=0, incentive=0, consensus=0, - trust=0, validator_trust=0, dividends=0, last_update=0, @@ -120,7 +112,6 @@ def get_null_neuron() -> "NeuronInfo": is_null=True, coldkey="000000000000000000000000000000000000000000000000", hotkey="000000000000000000000000000000000000000000000000", - pruning_score=0, ) return neuron @@ -151,12 +142,9 @@ def _from_dict(cls, decoded: Any) -> "NeuronInfo": last_update=decoded["last_update"], netuid=decoded["netuid"], prometheus_info=PrometheusInfo.from_dict(decoded["prometheus_info"]), - pruning_score=decoded["pruning_score"], - rank=u16_normalized_float(decoded["rank"]), stake_dict=stake_dict, stake=total_stake, total_stake=total_stake, - trust=u16_normalized_float(decoded["trust"]), uid=decoded["uid"], validator_permit=decoded["validator_permit"], validator_trust=u16_normalized_float(decoded["validator_trust"]), diff --git a/bittensor/core/chain_data/neuron_info_lite.py b/bittensor/core/chain_data/neuron_info_lite.py index 5b6968a479..e1d8a33048 100644 --- a/bittensor/core/chain_data/neuron_info_lite.py +++ b/bittensor/core/chain_data/neuron_info_lite.py @@ -23,18 +23,15 @@ class NeuronInfoLite(InfoBase): stake: The stake amount associated with the neuron. stake_dict: Mapping of coldkey to the amount staked to this Neuron. total_stake: Total amount of the stake. - rank: The rank of the neuron. emission: The emission value of the neuron. incentive: The incentive value of the neuron. consensus: The consensus value of the neuron. - trust: Trust value of the neuron. validator_trust: Validator trust value of the neuron. dividends: Dividends associated with the neuron. last_update: Timestamp of the last update. validator_permit: Indicates if the neuron has a validator permit. prometheus_info: Prometheus information associated with the neuron. axon_info: Axon information associated with the neuron. - pruning_score: The pruning score of the neuron. is_null: Indicates whether the neuron is null. Methods: @@ -51,18 +48,15 @@ class NeuronInfoLite(InfoBase): # mapping of coldkey to amount staked to this Neuron stake_dict: dict[str, "Balance"] total_stake: "Balance" - rank: float emission: float incentive: float consensus: float - trust: float validator_trust: float dividends: float last_update: int validator_permit: bool prometheus_info: Optional["PrometheusInfo"] axon_info: Optional["AxonInfo"] - pruning_score: int is_null: bool = False @staticmethod @@ -75,11 +69,9 @@ def get_null_neuron() -> "NeuronInfoLite": stake=Balance.from_rao(0), stake_dict={}, total_stake=Balance.from_rao(0), - rank=0, emission=0, incentive=0, consensus=0, - trust=0, validator_trust=0, dividends=0, last_update=0, @@ -89,7 +81,6 @@ def get_null_neuron() -> "NeuronInfoLite": is_null=True, coldkey="000000000000000000000000000000000000000000000000", hotkey="000000000000000000000000000000000000000000000000", - pruning_score=0, ) return neuron @@ -119,12 +110,9 @@ def _from_dict(cls, decoded: Any) -> "NeuronInfoLite": last_update=decoded["last_update"], netuid=decoded["netuid"], prometheus_info=PrometheusInfo.from_dict(decoded["prometheus_info"]), - pruning_score=decoded["pruning_score"], - rank=u16_normalized_float(decoded["rank"]), stake_dict=stake_dict, stake=stake, total_stake=stake, - trust=u16_normalized_float(decoded["trust"]), uid=decoded["uid"], validator_permit=decoded["validator_permit"], validator_trust=u16_normalized_float(decoded["validator_trust"]), diff --git a/bittensor/core/extrinsics/asyncex/mev_shield.py b/bittensor/core/extrinsics/asyncex/mev_shield.py index a31fab1dae..8aa8dd7043 100644 --- a/bittensor/core/extrinsics/asyncex/mev_shield.py +++ b/bittensor/core/extrinsics/asyncex/mev_shield.py @@ -40,14 +40,14 @@ async def wait_for_extrinsic_by_hash( extrinsic_hash: The hash of the inner extrinsic to find. shield_id: The wrapper ID from EncryptedSubmitted event (for detecting decryption failures). submit_block_hash: Block hash where submit_encrypted was included. - timeout_blocks: Max blocks to wait (default 3). + timeout_blocks: Max blocks to wait. Returns: Optional ExtrinsicReceipt. """ starting_block = await subtensor.substrate.get_block_number(submit_block_hash) - current_block = starting_block + 1 + current_block = starting_block while current_block - starting_block <= timeout_blocks: logging.debug( @@ -127,7 +127,7 @@ async def submit_encrypted_extrinsic( successfully decrypted and executed the inner call. If True, the function will poll subsequent blocks for the event matching this submission's commitment. blocks_for_revealed_execution: Maximum number of blocks to poll for the executed event after inclusion. - The function checks blocks from start_block + 1 to start_block + blocks_for_revealed_execution. Returns + The function checks blocks from start_block to start_block + blocks_for_revealed_execution. Returns immediately if the event is found before the block limit is reached. Returns: diff --git a/bittensor/core/extrinsics/asyncex/proxy.py b/bittensor/core/extrinsics/asyncex/proxy.py index 0986216d9e..bb6848b0ea 100644 --- a/bittensor/core/extrinsics/asyncex/proxy.py +++ b/bittensor/core/extrinsics/asyncex/proxy.py @@ -466,28 +466,28 @@ async def kill_pure_proxy_extrinsic( Parameters: subtensor: Subtensor instance with the connection to the chain. - wallet: Bittensor wallet object. The ``wallet.coldkey.ss58_address`` must be the spawner of the pure proxy (the - account that created it via :meth:`create_pure_proxy_extrinsic`). The spawner must have an ``"Any"`` proxy + wallet: Bittensor wallet object. The `wallet.coldkey.ss58_address` must be the spawner of the pure proxy (the + account that created it via :meth:`create_pure_proxy_extrinsic`). The spawner must have an `Any` proxy relationship with the pure proxy. pure_proxy_ss58: The SS58 address of the pure proxy account to be killed. This is the address that was returned in the :meth:`create_pure_proxy_extrinsic` response. spawner: The SS58 address of the spawner account (the account that originally created the pure proxy via - :meth:`create_pure_proxy_extrinsic`). This should match ``wallet.coldkey.ss58_address``. + :meth:`create_pure_proxy_extrinsic`). This should match `wallet.coldkey.ss58_address`. proxy_type: The type of proxy permissions that were used when creating the pure proxy. This must match exactly - the ``proxy_type`` that was passed to :meth:`create_pure_proxy_extrinsic`. - index: The salt value (u16, range ``0-65535``) originally used in :meth:`create_pure_proxy_extrinsic` to generate - this pure proxy's address. This value, combined with ``proxy_type``, ``delay``, and ``spawner``, uniquely + the `proxy_type` that was passed to :meth:`create_pure_proxy_extrinsic`. + index: The salt value (u16, range `0-65535`) originally used in :meth:`create_pure_proxy_extrinsic` to generate + this pure proxy's address. This value, combined with `proxy_type`, `delay`, and `spawner`, uniquely identifies the pure proxy to be killed. Must match exactly the index used during creation. - height: The block number at which the pure proxy was created. This is returned in the ``"PureCreated"`` event from + height: The block number at which the pure proxy was created. This is returned in the `PureCreated` event from :meth:`create_pure_proxy_extrinsic` and is required to identify the exact creation transaction. ext_index: The extrinsic index within the block at which the pure proxy was created. This is returned in the - ``"PureCreated"`` event from :meth:`create_pure_proxy_extrinsic` and specifies the position of the creation - extrinsic within the block. Together with ``height``, this uniquely identifies the creation transaction. - force_proxy_type: The proxy type relationship to use when executing ``kill_pure`` through the proxy mechanism. + `PureCreated` event from :meth:`create_pure_proxy_extrinsic` and specifies the position of the creation + extrinsic within the block. Together with `height`, this uniquely identifies the creation transaction. + force_proxy_type: The proxy type relationship to use when executing `kill_pure` through the proxy mechanism. Since pure proxies are keyless and cannot sign transactions, the spawner must act as a proxy for the pure - proxy to execute ``kill_pure``. This parameter specifies which proxy type relationship between the spawner and - the pure proxy account should be used. The spawner must have a proxy relationship of this type (or ``"Any"``) - with the pure proxy account. Defaults to ``ProxyType.Any`` for maximum compatibility. If ``None``, Substrate + proxy to execute `kill_pure`. This parameter specifies which proxy type relationship between the spawner and + the pure proxy account should be used. The spawner must have a proxy relationship of this type (or `Any`) + with the pure proxy account. Defaults to `ProxyType.Any` for maximum compatibility. If `None`, Substrate will automatically select an available proxy type from the spawner's proxy relationships. mev_protection: If True, encrypts and submits the transaction through the MEV Shield pallet to protect against front-running and MEV attacks. The transaction remains encrypted in the mempool until validators @@ -495,7 +495,7 @@ async def kill_pure_proxy_extrinsic( period: The number of blocks during which the transaction will remain valid after it's submitted. If the transaction is not included in a block within that number of blocks, it will expire and be rejected. You can think of it as an expiration date for the transaction. - raise_error: Raises a relevant exception rather than returning ``False`` if unsuccessful. + raise_error: Raises a relevant exception rather than returning `False` if unsuccessful. wait_for_inclusion: Whether to wait for the inclusion of the transaction. wait_for_finalization: Whether to wait for the finalization of the transaction. wait_for_revealed_execution: Whether to wait for the revealed execution of transaction if mev_protection used. @@ -504,10 +504,10 @@ async def kill_pure_proxy_extrinsic( ExtrinsicResponse: The result object of the extrinsic execution. Notes: - - The ``kill_pure`` call must be executed through the pure proxy account itself, with the spawner acting as a proxy. + - The `kill_pure` call must be executed through the pure proxy account itself, with the spawner acting as a proxy. This method automatically handles this by executing the call via :meth:`proxy_extrinsic`. By default, - ``force_proxy_type`` is set to ``ProxyType.Any``, meaning the spawner must have an ``"Any"`` proxy relationship - with the pure proxy. If you pass a different ``force_proxy_type``, the spawner must have that specific proxy + `force_proxy_type` is set to `ProxyType.Any`, meaning the spawner must have an `Any` proxy relationship + with the pure proxy. If you pass a different `force_proxy_type`, the spawner must have that specific proxy type relationship with the pure proxy. - See Pure Proxies: @@ -516,55 +516,31 @@ async def kill_pure_proxy_extrinsic( inaccessible after this operation. Example: - # After creating a pure proxy - create_response = subtensor.proxies.create_pure_proxy( - wallet=spawner_wallet, - proxy_type=ProxyType.Any, # Type of proxy permissions for the pure proxy - delay=0, - index=0, - ) - pure_proxy_ss58 = create_response.data["pure_account"] - spawner = create_response.data["spawner"] - proxy_type_used = create_response.data["proxy_type"] # The proxy_type used during creation - height = create_response.data["height"] - ext_index = create_response.data["ext_index"] # Kill the pure proxy - # Note: force_proxy_type defaults to ProxyType.Any (spawner must have Any proxy relationship) - kill_response = subtensor.proxies.kill_pure_proxy( - wallet=spawner_wallet, - pure_proxy_ss58=pure_proxy_ss58, - spawner=spawner, - proxy_type=proxy_type_used, # Must match the proxy_type used during creation - index=0, - height=height, - ext_index=ext_index, - # force_proxy_type=ProxyType.Any, # Optional: defaults to ProxyType.Any - ) - """ try: if not ( @@ -605,6 +581,7 @@ async def kill_pure_proxy_extrinsic( raise_error=raise_error, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, + wait_for_revealed_execution=wait_for_revealed_execution, ) if response.success: diff --git a/bittensor/core/extrinsics/asyncex/staking.py b/bittensor/core/extrinsics/asyncex/staking.py index 8ceaa4f98c..1ff60d8dae 100644 --- a/bittensor/core/extrinsics/asyncex/staking.py +++ b/bittensor/core/extrinsics/asyncex/staking.py @@ -376,6 +376,7 @@ async def add_stake_multiple_extrinsic( raise_error=raise_error, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, + wait_for_revealed_execution=wait_for_revealed_execution, ) data.update({(idx, hotkey_ss58, netuid): response}) diff --git a/bittensor/core/extrinsics/asyncex/sudo.py b/bittensor/core/extrinsics/asyncex/sudo.py index ec0bee9999..5a03d6e6d7 100644 --- a/bittensor/core/extrinsics/asyncex/sudo.py +++ b/bittensor/core/extrinsics/asyncex/sudo.py @@ -13,6 +13,7 @@ async def sudo_set_admin_freeze_window_extrinsic( subtensor: "AsyncSubtensor", wallet: "Wallet", window: int, + *, period: Optional[int] = None, raise_error: bool = False, wait_for_inclusion: bool = True, @@ -54,6 +55,7 @@ async def sudo_set_mechanism_count_extrinsic( wallet: "Wallet", netuid: int, mech_count: int, + *, period: Optional[int] = None, raise_error: bool = False, wait_for_inclusion: bool = True, @@ -96,6 +98,7 @@ async def sudo_set_mechanism_emission_split_extrinsic( wallet: "Wallet", netuid: int, maybe_split: MaybeSplit, + *, period: Optional[int] = None, raise_error: bool = False, wait_for_inclusion: bool = True, diff --git a/bittensor/core/extrinsics/asyncex/unstaking.py b/bittensor/core/extrinsics/asyncex/unstaking.py index 9ea41f9352..b442cbd0c6 100644 --- a/bittensor/core/extrinsics/asyncex/unstaking.py +++ b/bittensor/core/extrinsics/asyncex/unstaking.py @@ -451,6 +451,7 @@ async def unstake_multiple_extrinsic( raise_error=raise_error, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, + wait_for_revealed_execution=wait_for_revealed_execution, ) else: response = await unstake_extrinsic( diff --git a/bittensor/core/extrinsics/asyncex/utils.py b/bittensor/core/extrinsics/asyncex/utils.py index 3841bfc33d..803b797f58 100644 --- a/bittensor/core/extrinsics/asyncex/utils.py +++ b/bittensor/core/extrinsics/asyncex/utils.py @@ -17,6 +17,7 @@ async def sudo_call_extrinsic( sign_with: str = "coldkey", use_nonce: bool = False, nonce_key: str = "hotkey", + *, period: Optional[int] = None, raise_error: bool = False, wait_for_inclusion: bool = True, diff --git a/bittensor/core/extrinsics/mev_shield.py b/bittensor/core/extrinsics/mev_shield.py index 8f0ab3c854..a21a397270 100644 --- a/bittensor/core/extrinsics/mev_shield.py +++ b/bittensor/core/extrinsics/mev_shield.py @@ -39,14 +39,14 @@ def wait_for_extrinsic_by_hash( extrinsic_hash: The hash of the inner extrinsic to find. shield_id: The wrapper ID from EncryptedSubmitted event (for detecting decryption failures). submit_block_hash: Block hash where submit_encrypted was included. - timeout_blocks: Max blocks to wait (default 3). + timeout_blocks: Max blocks to wait. Returns: Optional ExtrinsicReceipt. """ starting_block = subtensor.substrate.get_block_number(submit_block_hash) - current_block = starting_block + 1 + current_block = starting_block while current_block - starting_block <= timeout_blocks: logging.debug( @@ -126,7 +126,7 @@ def submit_encrypted_extrinsic( successfully decrypted and executed the inner call. If True, the function will poll subsequent blocks for the event matching this submission's commitment. blocks_for_revealed_execution: Maximum number of blocks to poll for the executed event after inclusion. - The function checks blocks from start_block + 1 to start_block + blocks_for_revealed_execution. Returns + The function checks blocks from start_block to start_block + blocks_for_revealed_execution. Returns immediately if the event is found before the block limit is reached. Returns: diff --git a/bittensor/core/extrinsics/proxy.py b/bittensor/core/extrinsics/proxy.py index be4cceae38..42766eb0e3 100644 --- a/bittensor/core/extrinsics/proxy.py +++ b/bittensor/core/extrinsics/proxy.py @@ -515,53 +515,31 @@ def kill_pure_proxy_extrinsic( Example: # After creating a pure proxy - create_response = subtensor.proxies.create_pure_proxy( - wallet=spawner_wallet, - proxy_type=ProxyType.Any, # Type of proxy permissions for the pure proxy - delay=0, - index=0, - ) pure_proxy_ss58 = create_response.data["pure_account"] - spawner = create_response.data["spawner"] - proxy_type_used = create_response.data["proxy_type"] # The proxy_type used during creation - height = create_response.data["height"] - ext_index = create_response.data["ext_index"] - # Kill the pure proxy - # Note: force_proxy_type defaults to ProxyType.Any (spawner must have Any proxy relationship) kill_response = subtensor.proxies.kill_pure_proxy( - wallet=spawner_wallet, - pure_proxy_ss58=pure_proxy_ss58, - spawner=spawner, - proxy_type=proxy_type_used, # Must match the proxy_type used during creation - index=0, - height=height, - ext_index=ext_index, - # force_proxy_type=ProxyType.Any, # Optional: defaults to ProxyType.Any - ) - """ try: if not ( @@ -602,6 +580,7 @@ def kill_pure_proxy_extrinsic( raise_error=raise_error, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, + wait_for_revealed_execution=wait_for_revealed_execution, ) if response.success: diff --git a/bittensor/core/extrinsics/sudo.py b/bittensor/core/extrinsics/sudo.py index 0878245818..32d795287c 100644 --- a/bittensor/core/extrinsics/sudo.py +++ b/bittensor/core/extrinsics/sudo.py @@ -14,6 +14,7 @@ def sudo_set_admin_freeze_window_extrinsic( subtensor: "Subtensor", wallet: "Wallet", window: int, + *, period: Optional[int] = None, raise_error: bool = False, wait_for_inclusion: bool = True, @@ -55,6 +56,7 @@ def sudo_set_mechanism_count_extrinsic( wallet: "Wallet", netuid: int, mech_count: int, + *, period: Optional[int] = None, raise_error: bool = False, wait_for_inclusion: bool = True, @@ -97,6 +99,7 @@ def sudo_set_mechanism_emission_split_extrinsic( wallet: "Wallet", netuid: int, maybe_split: MaybeSplit, + *, period: Optional[int] = None, raise_error: bool = False, wait_for_inclusion: bool = True, diff --git a/bittensor/core/extrinsics/utils.py b/bittensor/core/extrinsics/utils.py index d8aa1a7094..fdeffdb9c8 100644 --- a/bittensor/core/extrinsics/utils.py +++ b/bittensor/core/extrinsics/utils.py @@ -100,6 +100,7 @@ def sudo_call_extrinsic( sign_with: str = "coldkey", use_nonce: bool = False, nonce_key: str = "hotkey", + *, period: Optional[int] = None, raise_error: bool = False, wait_for_inclusion: bool = True, @@ -240,9 +241,9 @@ def get_mev_commitment_and_ciphertext( Returns: A tuple containing: - - commitment_hex (str): Hex string of the Blake2-256 hash of payload_core (32 bytes). - - ciphertext (bytes): Encrypted blob containing plaintext. - - payload_core (bytes): Raw payload bytes before encryption. + - commitment_hex: Hex string of the Blake2-256 hash of payload_core (32 bytes). + - ciphertext: Encrypted blob containing plaintext. + - payload_core: Raw payload bytes before encryption. """ payload_core = signed_ext.data.data diff --git a/bittensor/core/metagraph.py b/bittensor/core/metagraph.py index 6e80bd24b3..633c139ea8 100644 --- a/bittensor/core/metagraph.py +++ b/bittensor/core/metagraph.py @@ -52,8 +52,6 @@ "n", "block", "stake", - "ranks", - "trust", "consensus", "validator_trust", "incentive", @@ -191,7 +189,6 @@ class MetagraphMixin(ABC): n (NDArray): The total number of neurons in the network, reflecting its size and complexity. block (NDArray): The current block number in the blockchain, crucial for synchronizing with the network's latest state. stake: Represents the cryptocurrency staked by neurons, impacting their influence and earnings within the network. - total_stake: The cumulative stake across all neurons. ranks: Neuron rankings as per the Yuma Consensus algorithm, influencing their incentive distribution and network authority. trust: Scores indicating the reliability of neurons, mainly miners, within the network's operational context. consensus: Scores reflecting each neuron's alignment with the network's collective decisions. @@ -322,20 +319,6 @@ def S(self) -> Tensor: """ return self.stake - @property - def R(self) -> Tensor: - """ - Contains the ranks of neurons in the Bittensor network. Ranks are determined by the network based - on each neuron's performance and contributions. Higher ranks typically indicate a greater level of - contribution or performance by a neuron. These ranks are crucial in determining the distribution of - incentives within the network, with higher-ranked neurons receiving more incentive. - - Returns: - Tensor: A tensor where each element represents the rank of a neuron. Higher values indicate higher ranks - within the network. - """ - return self.ranks - @property def I(self) -> Tensor: """ @@ -380,23 +363,6 @@ def C(self) -> Tensor: """ return self.consensus - @property - def T(self) -> Tensor: - """ - Represents the trust values assigned to each neuron in the Bittensor network. Trust is a key metric that - reflects the reliability and reputation of a neuron based on its past behavior and contributions. It is - an essential aspect of the network's functioning, influencing decision-making processes and interactions - between neurons. - - The trust matrix is inferred from the network's inter-peer weights, indicating the level of trust each neuron - has in others. A higher value in the trust matrix suggests a stronger trust relationship between neurons. - - Returns: - Tensor: A tensor of trust values, where each element represents the trust level of a neuron. Higher values - denote a higher level of trust within the network. - """ - return self.trust - @property def Tv(self) -> Tensor: """ @@ -633,8 +599,6 @@ def state_dict(self): "version": self.version, "n": self.n, "block": self.block, - "ranks": self.ranks, - "trust": self.trust, "consensus": self.consensus, "validator_trust": self.validator_trust, "incentive": self.incentive, @@ -762,10 +726,6 @@ def _set_metagraph_attributes(self, block: int): self.uids = self._create_tensor( [neuron.uid for neuron in self.neurons], dtype=self._dtype_registry["int64"] ) - self.trust = self._create_tensor( - [neuron.trust for neuron in self.neurons], - dtype=self._dtype_registry["float32"], - ) self.consensus = self._create_tensor( [neuron.consensus for neuron in self.neurons], dtype=self._dtype_registry["float32"], @@ -778,10 +738,6 @@ def _set_metagraph_attributes(self, block: int): [neuron.dividends for neuron in self.neurons], dtype=self._dtype_registry["float32"], ) - self.ranks = self._create_tensor( - [neuron.rank for neuron in self.neurons], - dtype=self._dtype_registry["float32"], - ) self.emission = self._create_tensor( [neuron.emission for neuron in self.neurons], dtype=self._dtype_registry["float32"], @@ -1060,7 +1016,7 @@ def __init__( metagraph = Metagraph(netuid=123, network="finney", lite=True, sync=True) """ BaseClass.__init__(self) - MetagraphMixin.__init__(self, netuid, network, lite, sync, subtensor, mechid) + MetagraphMixin.__init__(self, netuid, mechid, network, lite, sync, subtensor) self._dtype_registry = { "int64": torch.int64, "float32": torch.float32, @@ -1082,12 +1038,6 @@ def __init__( self.total_stake: torch.nn.Parameter = torch.nn.Parameter( torch.tensor([], dtype=torch.float32), requires_grad=False ) - self.ranks: torch.nn.Parameter = torch.nn.Parameter( - torch.tensor([], dtype=torch.float32), requires_grad=False - ) - self.trust: torch.nn.Parameter = torch.nn.Parameter( - torch.tensor([], dtype=torch.float32), requires_grad=False - ) self.consensus: torch.nn.Parameter = torch.nn.Parameter( torch.tensor([], dtype=torch.float32), requires_grad=False ) @@ -1159,8 +1109,6 @@ def load_from_path(self, dir_path: str) -> "MetagraphMixin": self.total_stake = torch.nn.Parameter( state_dict["total_stake"], requires_grad=False ) - self.ranks = torch.nn.Parameter(state_dict["ranks"], requires_grad=False) - self.trust = torch.nn.Parameter(state_dict["trust"], requires_grad=False) self.consensus = torch.nn.Parameter( state_dict["consensus"], requires_grad=False ) @@ -1234,8 +1182,6 @@ def __init__( self.version = np.array([settings.version_as_int], dtype=np.int64) self.n = np.array([0], dtype=np.int64) self.block = np.array([0], dtype=np.int64) - self.ranks = np.array([], dtype=np.float32) - self.trust = np.array([], dtype=np.float32) self.consensus = np.array([], dtype=np.float32) self.validator_trust = np.array([], dtype=np.float32) self.incentive = np.array([], dtype=np.float32) @@ -1296,8 +1242,6 @@ def load_from_path(self, dir_path: str) -> "MetagraphMixin": self.block = state_dict["block"] self.uids = state_dict["uids"] self.stake = state_dict["stake"] - self.ranks = state_dict["ranks"] - self.trust = state_dict["trust"] self.consensus = state_dict["consensus"] self.validator_trust = state_dict["validator_trust"] self.incentive = state_dict["incentive"] diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 804d160bce..6862f3a1c4 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -190,7 +190,7 @@ class Subtensor(SubtensorMixin): - **Alpha**: Subnet-specific token representing some quantity of TAO staked into a subnet. - **Rao**: Smallest unit of TAO (1 TAO = 1e9 Rao) - Bittensor Glossary - - Wallets, Coldkeys and Hotkeys in Bittensor + - Wallets, Coldkeys, and Hotkeys in Bittensor """ @@ -428,29 +428,6 @@ def _query_with_fallback( Raises: ValueError: If no default value is provided, and none of the methods exist at the given block, a ValueError will be raised. - - Example: - - value = self._query_with_fallback( - - # the first attempt will be made to SubtensorModule.MechanismEmissionSplit with params [1] - - ("SubtensorModule", "MechanismEmissionSplit", [1]), - - # if it does not exist at the given block, the next attempt will be made to - - # SubtensorModule.MechanismEmission with params None - - ("SubtensorModule", "MechanismEmission", None), - - block_hash="0x1234", - - # if none of the methods exist at the given block, the default value of None will be returned - - default_value=None, - - ) - """ if block_hash is None: block_hash = self.substrate.get_chain_head() @@ -489,37 +466,6 @@ def _runtime_call_with_fallback( Raises: ValueError: If no default value is provided, and none of the methods exist at the given block, a ValueError will be raised. - - Example: - - query = self._runtime_call_with_fallback( - - # the first attempt will be made to SubnetInfoRuntimeApi.get_selective_mechagraph with the - - # given params - - ( - - "SubnetInfoRuntimeApi", - - "get_selective_mechagraph", - - [netuid, mechid, [f for f in range(len(SelectiveMetagraphIndex))]], - - ), - - # if it does not exist at the given block, the next attempt will be made as such: - - ("SubnetInfoRuntimeApi", "get_metagraph", [[netuid]]), - - block_hash=block_hash, - - # if none of the methods exist at the given block, the default value will be returned - - default_value=None, - - ) - """ if block_hash is None: block_hash = self.substrate.get_chain_head() @@ -612,17 +558,11 @@ def sim_swap( Example: # Simulate staking 100 TAO stake to subnet 1 - result = subtensor.sim_swap( - origin_netuid=0, - destination_netuid=1, - amount=Balance.from_tao(100) - ) - print(f"Fee: {result.tao_fee.tao} TAO, Output: {result.alpha_amount} Alpha") Notes: @@ -991,15 +931,10 @@ def bonds( Bond values are u16-normalized (0-65535, where 65535 = 1.0 or 100%). Example: - # Get bonds for subnet 1 - bonds = subtensor.bonds(netuid=1) - print(bonds[0]) - # example output: (5, [(0, 32767), (1, 16383), (3, 8191)]) - # This means validator UID 5 has bonds: 50% to miner 0, 25% to miner 1, 12.5% to miner 3 Notes: @@ -1546,13 +1481,9 @@ def get_children( Example: # Get children for a hotkey in subnet 1 - success, children, error = subtensor.get_children(hotkey="5F...", netuid=1) - if success: - for proportion, child_hotkey in children: - print(f"Child {child_hotkey}: {proportion}") Notes: @@ -1599,7 +1530,6 @@ def get_children_pending( Returns: tuple: A tuple containing: - - list[tuple[float, str]]: A list of children with their proportions. - int: The cool-down block number. @@ -1698,13 +1628,14 @@ def get_crowdloan_constants( Otherwise, all known constants defined in `CrowdloanConstants.field_names()` are fetched. These constants define requirements and operational limits for crowdloan campaigns: - - - `AbsoluteMinimumContribution`: Minimum amount per contribution (TAO). - - `MaxContributors`: Maximum number of unique contributors per crowdloan. - - `MaximumBlockDuration`: Maximum duration (in blocks) for a crowdloan campaign (60 days = 432,000 blocks on production). - - `MinimumDeposit`: Minimum deposit required from the creator (TAO). - - `MinimumBlockDuration`: Minimum duration (in blocks) for a crowdloan campaign (7 days = 50,400 blocks on production). - - `RefundContributorsLimit`: Maximum number of contributors refunded per `refund_crowdloan` call (typically 50). + AbsoluteMinimumContribution: Minimum amount per contribution (TAO). + MaxContributors: Maximum number of unique contributors per crowdloan. + MaximumBlockDuration: Maximum duration (in blocks) for a crowdloan campaign (60 days = 432,000 blocks on + production). + MinimumDeposit: Minimum deposit required from the creator (TAO). + MinimumBlockDuration: Minimum duration (in blocks) for a crowdloan campaign (7 days = 50,400 blocks on + production). + RefundContributorsLimit: Maximum number of contributors refunded per `refund_crowdloan` call (typically 50). Parameters: constants: Specific constant names to query. If `None`, retrieves all constants from `CrowdloanConstants`. @@ -2437,26 +2368,18 @@ def get_metagraph_info( MetagraphInfo object with the requested subnet mechanism data, None if the subnet mechanism does not exist. Example: - # Retrieve all fields from the metagraph from subnet 2 mechanism 0 - meta_info = subtensor.get_metagraph_info(netuid=2) - # Retrieve all fields from the metagraph from subnet 2 mechanism 1 - meta_info = subtensor.get_metagraph_info(netuid=2, mechid=1) # Retrieve selective data from the metagraph from subnet 2 mechanism 0 - partial_meta_info = subtensor.get_metagraph_info( - netuid=2, - selected_indices=[SelectiveMetagraphIndex.Name, SelectiveMetagraphIndex.OwnerHotkeys] ) # Retrieve selective data from the metagraph from subnet 2 mechanism 1 - partial_meta_info = subtensor.get_metagraph_info( netuid=2, mechid=1, @@ -3409,7 +3332,7 @@ def get_stake( block=block, params=[hotkey_ss58, coldkey_ss58, netuid], ) - alpha_shares = cast(FixedPoint, alpha_shares_query) + alpha_shares = alpha_shares_query hotkey_alpha_obj: ScaleObj = self.query_module( module="SubtensorModule", @@ -3419,13 +3342,12 @@ def get_stake( ) hotkey_alpha = hotkey_alpha_obj.value - hotkey_shares_query = self.query_module( + hotkey_shares = self.query_module( module="SubtensorModule", name="TotalHotkeyShares", block=block, params=[hotkey_ss58, netuid], ) - hotkey_shares = cast(FixedPoint, hotkey_shares_query) alpha_shares_as_float = fixed_to_float(alpha_shares) hotkey_shares_as_float = fixed_to_float(hotkey_shares) @@ -4055,11 +3977,11 @@ def filter_netuids_by_registered_hotkeys( as long as they match filter_for_netuids. Parameters: - all_netuids (Iterable[int]): A list of netuids to consider for filtering. - filter_for_netuids (Iterable[int]): A subset of netuids to restrict the result to. If None/empty, returns - all netuids with registered hotkeys. - all_hotkeys (Iterable[Wallet]): Hotkeys to check for registration. - block (Optional[int]): The blockchain block number for the query. + all_netuids: A list of netuids to consider for filtering. + filter_for_netuids: A subset of netuids to restrict the result to. If None/empty, returns all netuids with + registered hotkeys. + all_hotkeys: Hotkeys to check for registration. + block: The blockchain block number for the query. Returns: The filtered list of netuids (union of filtered all_netuids and registered hotkeys). @@ -4130,8 +4052,8 @@ def is_in_admin_freeze_window( with validator weight submissions. Parameters: - netuid (int): The unique identifier of the subnet. - block (Optional[int]): The blockchain block number for the query. + netuid: The unique identifier of the subnet. + block: The blockchain block number for the query. Returns: bool: True if in freeze window, else False. @@ -4307,8 +4229,8 @@ def max_weight_limit( Returns: The stored maximum weight limit as a normalized float in [0, 1], or `None` if the subnetwork - does not exist. Note: this value is not actually enforced - the weight validation code uses - a hardcoded u16::MAX instead. + does not exist. Note: this value is not enforced - the weight validation code uses a hardcoded u16::MAX + instead. Notes: - This hyperparameter is now a constant rather than a settable variable. @@ -4494,14 +4416,11 @@ def query_identity( See the `Bittensor CLI documentation `_ for supported identity parameters. """ - identity_info = cast( - dict, - self.substrate.query( - module="SubtensorModule", - storage_function="IdentitiesV2", - params=[coldkey_ss58], - block_hash=self.determine_block_hash(block), - ), + identity_info = self.substrate.query( + module="SubtensorModule", + storage_function="IdentitiesV2", + params=[coldkey_ss58], + block_hash=self.determine_block_hash(block), ) if not identity_info: @@ -4969,21 +4888,13 @@ def get_extrinsic_fee( Balance object representing the extrinsic fee in Rao. Example: - # Estimate fee before sending a transfer - call = subtensor.compose_call( - call_module="Balances", - call_function="transfer", - call_params={"dest": destination_ss58, "value": amount.rao} - ) - fee = subtensor.get_extrinsic_fee(call=call, keypair=wallet.coldkey) - print(f"Estimated fee: {fee.tao} TAO") Notes: @@ -5796,7 +5707,7 @@ def finalize_crowdloan( - Only the creator can finalize. - Finalization requires `raised == cap` and `current_block >= end`. - For subnet leases, emissions are swapped to TAO and distributed to contributors' coldkeys during the lease. - - Leftover cap (after subnet lock + proxy deposit) is refunded to contributors pro-rata. + - Leftover cap (after subnet lock and proxy deposit) is refunded to contributors pro-rata. - Crowdloans Overview: - Crowdloan Tutorial: @@ -5936,7 +5847,7 @@ def mev_submit_encrypted( have successfully decrypted and executed the inner call. If True, the function will poll subsequent blocks for the event matching this submission's commitment. blocks_for_revealed_execution: Maximum number of blocks to poll for the executed event after inclusion. The - function checks blocks from start_block+1 to start_block + blocks_for_revealed_execution. Returns + function checks blocks from start_block to start_block + blocks_for_revealed_execution. Returns immediately if the event is found before the block limit is reached. Returns: @@ -6010,41 +5921,24 @@ def modify_liquidity( ExtrinsicResponse: The result object of the extrinsic execution. Example: - import bittensor as bt - subtensor = bt.subtensor(network="local") - my_wallet = bt.Wallet() - # if liquidity_delta is negative - my_liquidity_delta = Balance.from_tao(100) * -1 - subtensor.modify_liquidity( - wallet=my_wallet, - netuid=123, - position_id=2, - liquidity_delta=my_liquidity_delta - ) # if liquidity_delta is positive - my_liquidity_delta = Balance.from_tao(120) - subtensor.modify_liquidity( - wallet=my_wallet, - netuid=123, - position_id=2, - liquidity_delta=my_liquidity_delta ) @@ -6899,12 +6793,12 @@ def root_set_pending_childkey_cooldown( mev_protection: If `True`, encrypts and submits the transaction through the MEV Shield pallet to protect against front-running and MEV attacks. The transaction remains encrypted in the mempool until validators decrypt and execute it. If `False`, submits the transaction directly without encryption. - period (Optional[int]): The number of blocks during which the transaction will remain valid after it's + period: The number of blocks during which the transaction will remain valid after it's submitted. If the transaction is not included in a block within that number of blocks, it will expire and be rejected. You can think of it as an expiration date for the transaction. raise_error: Raises a relevant exception rather than returning `False` if unsuccessful. - wait_for_inclusion (bool): Waits for the transaction to be included in a block. - wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. + wait_for_inclusion: Waits for the transaction to be included in a block. + wait_for_finalization: Waits for the transaction to be finalized on the blockchain. wait_for_revealed_execution: Whether to wait for the revealed execution of transaction if mev_protection used. Returns: @@ -7447,9 +7341,9 @@ def set_commitment( Parameters: - wallet (bittensor_wallet.Wallet): The wallet associated with the neuron committing the data. - netuid (int): The unique identifier of the subnetwork. - data (str): The data to be committed to the network. + wallet: The wallet associated with the neuron committing the data. + netuid: The unique identifier of the subnetwork. + data: The data to be committed to the network. mev_protection: If `True`, encrypts and submits the transaction through the MEV Shield pallet to protect against front-running and MEV attacks. The transaction remains encrypted in the mempool until validators decrypt and execute it. If `False`, submits the transaction directly without encryption. @@ -7465,13 +7359,10 @@ def set_commitment( ExtrinsicResponse: The result object of the extrinsic execution. Example: - # Commit some data to subnet 1 - response = await subtensor.commit(wallet=my_wallet, netuid=1, data="Hello Bittensor!") # Commit with custom period - response = await subtensor.commit(wallet=my_wallet, netuid=1, data="Model update v2.0", period=100) Note: See @@ -7951,63 +7842,35 @@ def unstake_all( ExtrinsicResponse: The result object of the extrinsic execution. Example: - # If you would like to unstake all stakes in all subnets safely: - import bittensor as bt - subtensor = bt.Subtensor() - wallet = bt.Wallet("my_wallet") - netuid = 14 - hotkey = "5%SOME_HOTKEY%" - wallet_stakes = subtensor.get_stake_info_for_coldkey(coldkey_ss58=wallet.coldkey.ss58_address) - for stake in wallet_stakes: - result = subtensor.unstake_all( - wallet=wallet, - hotkey_ss58=stake.hotkey_ss58, - netuid=stake.netuid, - ) - print(result) # If you would like to unstake all stakes in all subnets unsafely, use rate_tolerance=None: - import bittensor as bt - subtensor = bt.Subtensor() - wallet = bt.Wallet("my_wallet") - netuid = 14 - hotkey = "5%SOME_HOTKEY_WHERE_IS_YOUR_STAKE_NOW%" - wallet_stakes = subtensor.get_stake_info_for_coldkey(coldkey_ss58=wallet.coldkey.ss58_address) - for stake in wallet_stakes: - result = subtensor.unstake_all( - wallet=wallet, - hotkey_ss58=stake.hotkey_ss58, - netuid=stake.netuid, - rate_tolerance=None, - ) - print(result) Notes: diff --git a/bittensor/utils/mock/subtensor_mock.py b/bittensor/utils/mock/subtensor_mock.py index 4fd1277975..95c6d4af54 100644 --- a/bittensor/utils/mock/subtensor_mock.py +++ b/bittensor/utils/mock/subtensor_mock.py @@ -919,14 +919,11 @@ def _neuron_subnet_exists( uid=uid, netuid=netuid, active=active, - rank=rank, emission=emission, incentive=incentive, consensus=consensus, - trust=trust, validator_trust=validator_trust, dividends=dividends, - pruning_score=pruning_score, last_update=last_update, validator_permit=validator_permit, stake=stake, diff --git a/contrib/CODE_REVIEW_DOCS.md b/contrib/CODE_REVIEW_DOCS.md index 9909606a89..2536fa12e2 100644 --- a/contrib/CODE_REVIEW_DOCS.md +++ b/contrib/CODE_REVIEW_DOCS.md @@ -1,25 +1,4 @@ # Code Review -### Conceptual Review - -A review can be a conceptual review, where the reviewer leaves a comment - * `Concept (N)ACK`, meaning "I do (not) agree with the general goal of this pull - request", - * `Approach (N)ACK`, meaning `Concept ACK`, but "I do (not) agree with the - approach of this change". - -A `NACK` needs to include a rationale why the change is not worthwhile. -NACKs without accompanying reasoning may be disregarded. -After conceptual agreement on the change, code review can be provided. A review -begins with `ACK BRANCH_COMMIT`, where `BRANCH_COMMIT` is the top of the PR -branch, followed by a description of how the reviewer did the review. The -following language is used within pull request comments: - - - "I have tested the code", involving change-specific manual testing in - addition to running the unit, functional, or fuzz tests, and in case it is - not obvious how the manual testing was done, it should be described; - - "I have not tested the code, but I have reviewed it and it looks - OK, I agree it can be merged"; - - A "nit" refers to a trivial, often non-blocking issue. ### Code Review Project maintainers reserve the right to weigh the opinions of peer reviewers @@ -40,33 +19,11 @@ a worthwhile change based on the judgement of the maintainers. ### Finding Reviewers -As most reviewers are themselves developers with their own projects, the review -process can be quite lengthy, and some amount of patience is required. If you find -that you've been waiting for a pull request to be given attention for several -months, there may be a number of reasons for this, some of which you can do something -about: +Add `Cortex` to the reviewers of your PR. If you are unable to add this group (as can happen with +forks), you can directly add the maintainers: [basfroman](https://github.com/basfroman), +[thewhaleking](https://github.com/thewhaleking), and [ibraheem-abe](https://github.com/ibraheem-abe). - - It may be because of a feature freeze due to an upcoming release. During this time, - only bug fixes are taken into consideration. If your pull request is a new feature, - it will not be prioritized until after the release. Wait for the release. - - It may be because the changes you are suggesting do not appeal to people. Rather than - nits and critique, which require effort and means they care enough to spend time on your - contribution, thundering silence is a good sign of widespread (mild) dislike of a given change - (because people don't assume *others* won't actually like the proposal). Don't take - that personally, though! Instead, take another critical look at what you are suggesting - and see if it: changes too much, is too broad, doesn't adhere to the - [developer notes](DEVELOPMENT_WORKFLOW.md), is dangerous or insecure, is messily written, etc. - Identify and address any of the issues you find. Then ask e.g. on IRC if someone could give - their opinion on the concept itself. - - It may be because your code is too complex for all but a few people, and those people - may not have realized your pull request even exists. A great way to find people who - are qualified and care about the code you are touching is the - [Git Blame feature](https://docs.github.com/en/github/managing-files-in-a-repository/managing-files-on-github/tracking-changes-in-a-file). Simply - look up who last modified the code you are changing and see if you can find - them and give them a nudge. Don't be incessant about the nudging, though. - - Finally, if all else fails, ask on IRC or elsewhere for someone to give your pull request - a look. If you think you've been waiting for an unreasonably long time (say, - more than a month) for no particular reason (a few lines changed, etc.), - this is totally fine. Try to return the favor when someone else is asking - for feedback on their code, and the universe balances out. - - Remember that the best thing you can do while waiting is give review to others! \ No newline at end of file +The maintainers generally try to commit at least a small portion of time each day to reviewing community PRs. +However, in some cases, new PRs can be missed. If your PR has been sitting in an unreviewed status for longer +than a few business days (Monday - Friday), feel free to tag us on the [Church of Rao](https://discord.gg/brRAeVCmzM) +[#btcli-btsdk](https://discord.com/channels/1120750674595024897/1242999357436071956) Discord channel. diff --git a/contrib/CONTRIBUTING.md b/contrib/CONTRIBUTING.md index e0a3c287b4..7cb2f8a32f 100644 --- a/contrib/CONTRIBUTING.md +++ b/contrib/CONTRIBUTING.md @@ -1,6 +1,7 @@ # Contributing to Bittensor -The following is a set of guidelines for contributing to Bittensor, which are hosted in the [Opentensor Organization](https://github.com/opentensor) on GitHub. These are mostly guidelines, not rules. Use your best judgment, and feel free to propose changes to this document in a pull request. +The following is a set of guidelines for contributing to Bittensor, which is hosted in the [Opentensor Organization](https://github.com/opentensor) on GitHub. +These are mostly guidelines, not rules. Use your best judgment, and feel free to propose changes to this document in a pull request. ## Table Of Contents 1. [I don't want to read this whole thing, I just have a question!!!](#i-dont-want-to-read-this-whole-thing-i-just-have-a-question) @@ -26,31 +27,49 @@ The following is a set of guidelines for contributing to Bittensor, which are ho > **Note:** Please don't file an issue to ask a question. You'll get faster results by using the resources below. We have an official Discord server where the community chimes in with helpful advice if you have questions. -This is the fastest way to get an answer and the core development team is active on Discord. +This is the fastest way to get an answer and the core development team is active on Discord. Also linked is the +more community-oriented Church of Rao Discord, which has channels that focus more on development than specific subnets +or generalities. * [Official Bittensor Discord](https://discord.gg/bittensor) +* [Church of Rao Discord](https://discord.gg/brRAeVCmzM) ## What should I know before I get started? -Bittensor is still in the Alpha stages, and as such you will likely run into some problems in deploying your model or installing Bittensor itself. If you run into an issue or end up resolving an issue yourself, feel free to create a pull request with a fix or with a fix to the documentation. The documentation repository can be found [here](https://github.com/latent-to/developer-docs). +Bittensor is constantly growing with new features, and as such you will likely run into some problems in deploying +your model or installing Bittensor itself. If you run into an issue or end up resolving an issue yourself, +feel free to create a pull request with a fix or with a fix to the documentation. The documentation repository +can be found [here](https://github.com/latent-to/developer-docs). -Additionally, note that the core implementation of Bittensor consists of two separate repositories: [The core Bittensor code](https://github.com/opentensor/bittensor) and the Bittensor Blockchain [subtensor](https://github.com/opentensor/subtensor). +Additionally, note that the core implementation of Bittensor consists of two separate repositories: +[The core Bittensor code](https://github.com/opentensor/bittensor) and the Bittensor Blockchain [subtensor](https://github.com/opentensor/subtensor). -Supplemental repository for the Bittensor subnet template can be found [here](https://github.com/opentensor/bittensor-subnet-template). This is a great first place to look for getting your hands dirty and start learning and building on Bittensor. See the [Tao.app](https://www.tao.app/explorer) explorer for a list of all the repositories for the active registered subnets. +Supplemental repository for the Bittensor subnet template can be found [here](https://github.com/opentensor/bittensor-subnet-template). +This is a great first place to look for getting your hands dirty and start learning and building on Bittensor. +See the [Tao.app](https://www.tao.app/explorer) explorer for a list of all the repositories for the active registered subnets. ## Getting Started New contributors are very welcome and needed. -Reviewing and testing is highly valued and the most effective way you can contribute as a new contributor. It also will teach you much more about the code and process than opening pull requests. +Reviewing and testing is highly valued and the most effective way you can contribute as a new contributor. +It also will teach you much more about the code and process than opening pull requests. -Before you start contributing, familiarize yourself with the Bittensor Core build system and tests. Refer to the documentation in the repository on how to build Bittensor core and how to run the unit tests, functional tests. +Before you start contributing, familiarize yourself with the Bittensor Core build system and tests. +Refer to the documentation in the repository on how to build Bittensor core and how to run the unit, integration, +and end-to-end (e2e) tests. -There are many open issues of varying difficulty waiting to be fixed. If you're looking for somewhere to start contributing, check out the [good first issue](https://github.com/opentensor/bittensor/labels/good%20first%20issue) list or changes that are up for grabs. Some of them might no longer be applicable. So if you are interested, but unsure, you might want to leave a comment on the issue first. Also peruse the [issues](https://github.com/opentensor/bittensor/issues) tab for all open issues. +There are frequently open issues of varying difficulty waiting to be fixed. +If you're looking for somewhere to start contributing, check out the [good first issue](https://github.com/opentensor/bittensor/labels/good%20first%20issue) +list or changes that are up for grabs. Some of them might no longer be applicable. +So if you are interested, but unsure, you might want to leave a comment on the issue first. +Also peruse the [issues](https://github.com/opentensor/bittensor/issues) tab for all open issues. ### Good First Issue Label The purpose of the good first issue label is to highlight which issues are suitable for a new contributor without a deep understanding of the codebase. However, good first issues can be solved by anyone. If they remain unsolved for a longer time, a frequent contributor might address them. -You do not need to request permission to start working on an issue. However, you are encouraged to leave a comment if you are planning to work on it. This will help other contributors monitor which issues are actively being addressed and is also an effective way to request assistance if and when you need it. +You do not need to request permission to start working on an issue. However, you are encouraged to leave a comment +if you are planning to work on it. This will help other contributors monitor which issues are actively being +addressed and is also an effective way to request assistance if and when you need it. ### Beginner and Help-wanted Issues Label You can start by looking through these `beginner` and `help-wanted` issues: @@ -73,7 +92,7 @@ You can contribute to Bittensor in one of two main ways (as well as many others) Here is a high-level summary: - Code consistency is crucial; adhere to established programming language conventions. -- Use `ruff format .` to format your Python code; it ensures readability and consistency. +- Use `make check` to format and check all linters in your Python code; it ensures readability and consistency. - Write concise Git commit messages; summarize changes in ~50 characters. - Follow these six commit rules: - Atomic Commits: Focus on one task or fix per commit. @@ -90,11 +109,16 @@ Here is a high-level summary: > Review the Bittensor [style guide](./STYLE.md) and [development workflow](./DEVELOPMENT_WORKFLOW.md) before contributing. -If you're looking to contribute to Bittensor but unsure where to start, please join our community [discord](https://discord.gg/bittensor), a developer-friendly Bittensor town square. You can also browse through the GitHub [issues](https://github.com/opentensor/bittensor/issues) to see where help might be needed. For a greater understanding of Bittensor's usage and development, check the [Bittensor Documentation](https://docs.learnbittensor.org). +If you're looking to contribute to Bittensor but unsure where to start, +please join our community [discord](https://discord.gg/bittensor), a developer-friendly Bittensor town square. +You can also browse through the GitHub [issues](https://github.com/opentensor/bittensor/issues) to see where help might be needed. +For a greater understanding of Bittensor's usage and development, check the [Bittensor Documentation](https://docs.learnbittensor.org). #### Pull Request Philosophy -Patchsets and enhancements should always be focused. A pull request could add a feature, fix a bug, or refactor code, but it should not contain a mixture of these. Please also avoid 'super' pull requests which attempt to do too much, are overly large, or overly complex as this makes review difficult. +Patchsets and enhancements should always be focused. A pull request could add a feature, fix a bug, or refactor code, +but it should not contain a mixture of these. Please also avoid 'super' pull requests which attempt to do too much, +are overly large, or overly complex as this makes review difficult. Specifically, pull requests must adhere to the following criteria: - **Must** branch off from `staging`. Make sure that all your PRs are using `staging` branch as a base or will be closed. @@ -103,7 +127,7 @@ Specifically, pull requests must adhere to the following criteria: - If a PR introduces a new feature, it *must* include corresponding tests. - Other PRs (bug fixes, refactoring, etc.) should ideally also have tests, as they provide proof of concept and prevent regression. - Categorize your PR properly by using GitHub labels. This aids in the review process by informing reviewers about the type of change at a glance. -- Make sure your code includes adequate comments. These should explain why certain decisions were made and how your changes work. +- Make sure your code includes adequate, but not unnecessary comments. These should explain why certain decisions were made and how your changes work. - If your changes are extensive, consider breaking your PR into smaller, related PRs. This makes your contributions easier to understand and review. - Be active in the discussion about your PR. Respond promptly to comments and questions to help reviewers understand your changes and speed up the acceptance process. @@ -126,16 +150,14 @@ Please follow these steps to have your contribution considered by the maintainer 3. Include relevant tests for any fixed bugs or new features as stated in the [testing guide](./TESTING.md). 4. Follow all instructions in [the template](../.github/pull_request_template.md) to create the PR. 5. Ensure your commit messages are clear and concise. Include the issue number if applicable. -6. If you have multiple commits, rebase them into a single commit using `git rebase -i`. -7. Explain what your changes do and why you think they should be merged in the PR description consistent with the [style guide](./STYLE.md). +6. Explain what your changes do and why you think they should be merged in the PR description consistent with the [style guide](./STYLE.md). *After* creating the PR: 1. Verify that all [status checks](https://help.github.com/articles/about-status-checks/) are passing after you submit your pull request. 2. Label your PR using GitHub's labeling feature. The labels help categorize the PR and streamline the review process. -3. Document your code with comments that provide a clear understanding of your changes. Explain any non-obvious parts of your code or design decisions you've made. -4. If your PR has extensive changes, consider splitting it into smaller, related PRs. This reduces the cognitive load on the reviewers and speeds up the review process. -Please be responsive and participate in the discussion on your PR! This aids in clarifying any confusion or concerns and leads to quicker resolution and merging of your PR. +Please be responsive and participate in the discussion on your PR! This aids in clarifying any confusion or concerns and +leads to quicker resolution and merging of your PR. > Note: If your changes are not ready for merge but you want feedback, create a draft pull request. @@ -146,7 +168,8 @@ When you are ready to submit your changes, create a pull request: > **Always** follow the [style guide](./STYLE.md) and [development workflow](./DEVELOPMENT_WORKFLOW.md) before submitting pull requests. -After you submit a pull request, it will be reviewed by the maintainers. They may ask you to make changes. Please respond to any comments and push your changes as a new commit. +After you submit a pull request, it will be reviewed by the maintainers. +They may ask you to make changes. Please respond to any comments and push your changes as a new commit. > Note: Be sure to merge the latest from "upstream" before making a pull request: @@ -158,7 +181,8 @@ git push origin ``` #### Testing -Before making a PR for any code changes, please write adequate testing with unittest and/or pytest if it is warranted. This is **mandatory** for new features and enhancements. See the [testing guide](./TESTING.md) for more complete information. +Before making a PR for any code changes, please write adequate testing with pytest if it is warranted. +This is **mandatory** for new features and enhancements. See the [testing guide](./TESTING.md) for more complete information. You may also like to view the [/tests](https://github.com/opentensor/bittensor/tree/master/tests) for starter examples. @@ -173,9 +197,12 @@ Remember, testing is crucial for maintaining code health, catching issues early, #### Addressing Feedback -After submitting your pull request, expect comments and reviews from other contributors. You can add more commits to your pull request by committing them locally and pushing to your fork. +After submitting your pull request, expect comments and reviews from other contributors. +You can add more commits to your pull request by committing them locally and pushing to your fork. -You are expected to reply to any review comments before your pull request is merged. You may update the code or reject the feedback if you do not agree with it, but you should express so in a reply. If there is outstanding feedback and you are not actively working on it, your pull request may be closed. +You are expected to reply to any review comments before your pull request is merged. +You may update the code or reject the feedback if you do not agree with it, but you should express so in a reply. +If there is outstanding feedback and you are not actively working on it, your pull request will be closed. #### Squashing Commits @@ -189,68 +216,92 @@ If your pull request contains fixup commits (commits that change the same line o # Save and quit. git push -f # (force push to GitHub) -Please update the resulting commit message, if needed. It should read as a coherent message. In most cases, this means not just listing the interim commits. +Please update the resulting commit message, if needed. It should read as a coherent message. In most cases, +this means not just listing the interim commits. -If your change contains a merge commit, the above workflow may not work and you will need to remove the merge commit first. See the next section for details on how to rebase. +If your change contains a merge commit, the above workflow may not work and you will need to remove the merge +commit first. See the next section for details on how to rebase. -Please refrain from creating several pull requests for the same change. Use the pull request that is already open (or was created earlier) to amend changes. This preserves the discussion and review that happened earlier for the respective change set. +Please refrain from creating several pull requests for the same change. Use the pull request that is already open +(or was created earlier) to amend changes. This preserves the discussion and review that happened earlier for the respective change set. The length of time required for peer review is unpredictable and will vary from pull request to pull request. #### Refactoring -Refactoring is a necessary part of any software project's evolution. The following guidelines cover refactoring pull requests for the Bittensor project. +Refactoring is a necessary part of any software project's evolution. +The following guidelines cover refactoring pull requests for the Bittensor project. -There are three categories of refactoring: code-only moves, code style fixes, and code refactoring. In general, refactoring pull requests should not mix these three kinds of activities in order to make refactoring pull requests easy to review and uncontroversial. In all cases, refactoring PRs must not change the behaviour of code within the pull request (bugs must be preserved as is). +There are three categories of refactoring: code-only moves, code style fixes, and code refactoring. In general, +refactoring pull requests should not mix these three kinds of activities in order to make refactoring pull requests +easy to review and uncontroversial. In all cases, refactoring PRs must not change the behaviour of code within the +pull request (bugs must be preserved as is). -Project maintainers aim for a quick turnaround on refactoring pull requests, so where possible keep them short, uncomplex and easy to verify. +Project maintainers aim for a quick turnaround on refactoring pull requests, so where possible keep them short, +uncomplex and easy to verify. -Pull requests that refactor the code should not be made by new contributors. It requires a certain level of experience to know where the code belongs to and to understand the full ramification (including rebase effort of open pull requests). Trivial pull requests or pull requests that refactor the code with no clear benefits may be immediately closed by the maintainers to reduce unnecessary workload on reviewing. +Pull requests that refactor the code should not be made by new contributors. +It requires a certain level of experience to know where the code belongs to and to understand the full +ramification (including rebase effort of open pull requests). Trivial pull requests or pull requests that +refactor the code with no clear benefits may be immediately closed by the maintainers to reduce +unnecessary workload on reviewing. #### Peer Review -Anyone may participate in peer review which is expressed by comments in the pull request. Typically reviewers will review the code for obvious errors, as well as test out the patch set and opine on the technical merits of the patch. Project maintainers take into account the peer review when determining if there is consensus to merge a pull request (remember that discussions may have taken place elsewhere, not just on GitHub). The following language is used within pull-request comments: +Anyone may participate in peer review which is expressed by comments in the pull request. +Typically, reviewers will review the code for obvious errors, as well as test out the patch set and +opine on the technical merits of the patch. Project maintainers take into account the peer review when +determining if there is consensus to merge a pull request (remember that discussions may have taken +place elsewhere, not just on GitHub). -- ACK means "I have tested the code and I agree it should be merged"; -- NACK means "I disagree this should be merged", and must be accompanied by sound technical justification. NACKs without accompanying reasoning may be disregarded; -- utACK means "I have not tested the code, but I have reviewed it and it looks OK, I agree it can be merged"; -- Concept ACK means "I agree in the general principle of this pull request"; -- Nit refers to trivial, often non-blocking issues. - -Reviewers should include the commit(s) they have reviewed in their comments. This can be done by copying the commit SHA1 hash. - -A pull request that changes consensus-critical code is considerably more involved than a pull request that adds a feature to the wallet, for example. Such patches must be reviewed and thoroughly tested by several reviewers who are knowledgeable about the changed subsystems. Where new features are proposed, it is helpful for reviewers to try out the patch set on a test network and indicate that they have done so in their review. Project maintainers will take this into consideration when merging changes. +A pull request that changes consensus-critical code is considerably more involved than a pull request that adds a +feature to the logger output, for example. Such patches must be reviewed and thoroughly tested by several +reviewers who are knowledgeable about the changed subsystems. Where new features are proposed, +it is helpful for reviewers to try out the patch set on a test network and indicate that they have done +so in their review. Project maintainers will take this into consideration when merging changes. For a more detailed description of the review process, see the [Code Review Guidelines](./CODE_REVIEW_DOCS.md). ### Reporting Bugs -This section guides you through submitting a bug report for Bittensor. Following these guidelines helps maintainers and the community understand your report :pencil:, reproduce the behavior :computer: :computer:, and find related reports :mag_right:. +This section guides you through submitting a bug report for Bittensor. +Following these guidelines helps maintainers and the community understand your report, reproduce the behavior, and find +related reports. When you are creating a bug report, please [include as many details as possible](#how-do-i-submit-a-good-bug-report). -> **Note:** If you find a **Closed** issue that seems like it is the same thing that you're experiencing, open a new issue and include a link to the original issue in the body of your new one. +> **Note:** If you find a **Closed** issue that seems like it is the same thing that you're experiencing, +> open a new issue and include a link to the original issue in the body of your new one. #### Before Submitting A Bug Report -* **Check the [debugging guide](./DEBUGGING.md).** You might be able to find the cause of the problem and fix things yourself. Most importantly, check if you can reproduce the problem in the latest version of Bittensor by updating to the latest Master branch changes. +* **Check the [debugging guide](./DEBUGGING.md).** You might be able to find the cause of the problem and fix things yourself. +Most importantly, check if you can reproduce the problem in the latest version of Bittensor by updating to the latest Master branch changes. * **Check the [Discord Server](https://discord.gg/bittensor)** and ask in [#general](https://discord.com/channels/799672011265015819/799672011814862902). -* **Determine which repository the problem should be reported in**: if it has to do with your ML model, then it's likely [Bittensor](https://github.com/opentensor/bittensor). If you are having problems with your emissions or Blockchain, then it is in [subtensor](https://github.com/opentensor/subtensor). +* **Determine which repository the problem should be reported in**: if it has to do with incorrect client-side behavior, +then it's likely [Bittensor](https://github.com/opentensor/bittensor). +If you are having problems with your emissions or Blockchain, then it is in [subtensor](https://github.com/opentensor/subtensor). #### How Do I Submit A (Good) Bug Report? -Bugs are tracked as [GitHub issues](https://guides.github.com/features/issues/). You can find Bittensor's issues [here](https://github.com/opentensor/bittensor/issues). After you've determined which repository ([Bittensor](https://github.com/opentensor/bittensor) or [subtensor](https://github.com/opentensor/subtensor)) your bug is related to, create an issue on that repository. +Bugs are tracked as [GitHub issues](https://guides.github.com/features/issues/). You can find Bittensor's issues [here](https://github.com/opentensor/bittensor/issues). +After you've determined which repository ([Bittensor](https://github.com/opentensor/bittensor) or [subtensor](https://github.com/opentensor/subtensor)) your bug is related to, create an issue on that repository. Explain the problem and include additional details to help maintainers reproduce the problem: * **Use a clear and descriptive title** for the issue to identify the problem. -* **Describe the exact steps which reproduce the problem** in as many details as possible. For example, start by explaining how you started Bittensor, e.g. which command exactly you used in the terminal, or how you started Bittensor otherwise. When listing steps, **don't just say what you did, but explain how you did it**. For example, if you ran Bittensor with a set of custom configs, explain if you used a config file or command line arguments. -* **Provide specific examples to demonstrate the steps**. Include links to files or GitHub projects, or copy/pasteable snippets, which you use in those examples. If you're providing snippets in the issue, use [Markdown code blocks](https://docs.github.com/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks). +* **Describe the exact steps which reproduce the problem** in as many details as possible. +For example, start by explaining how you started Bittensor, e.g. which command exactly you used in the terminal, +or how you started Bittensor otherwise. When listing steps, **don't just say what you did, but explain how you did it**. For example, if you ran Bittensor with a set of custom configs, explain if you used a config file or command line arguments. +* **Provide specific examples to demonstrate the steps**. Include links to files or GitHub projects, or copy/pasteable snippets, +which you use in those examples. If you're providing snippets in the issue, use [Markdown code blocks](https://docs.github.com/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks). * **Describe the behavior you observed after following the steps** and point out what exactly is the problem with that behavior. * **Explain which behavior you expected to see instead and why.** -* **Include screenshots and animated GIFs** which show you following the described steps and clearly demonstrate the problem. You can use [Licecap](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [Silentcast](https://github.com/colinkeenan/silentcast) or [byzanz-record](https://manpages.ubuntu.com/manpages/questing/en/man1/byzanz-record.1.html) on Linux. -* **If you're reporting that Bittensor crashed**, include a crash report with a stack trace from the operating system. On macOS, the crash report will be available in `Console.app` under "Diagnostic and usage information" > "User diagnostic reports". Include the crash report in the issue in a [code block](https://docs.github.com/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks), a [file attachment](https://docs.github.com/articles/file-attachments-on-issues-and-pull-requests/), or put it in a [gist](https://gist.github.com/) and provide link to that gist. -* **If the problem is related to performance or memory**, include a CPU profile capture with your report, if you're using a GPU then include a GPU profile capture as well. Look into the [PyTorch Profiler](https://pytorch.org/tutorials/recipes/recipes/profiler_recipe.html) to look at memory usage of your model. +* **Include screenshots and animated GIFs** which show you following the described steps and clearly demonstrate the problem. +* **If you're reporting that Bittensor crashed**, include a crash report with a stack trace from the operating system. +On macOS, the crash report will be available in `Console.app` under "Diagnostic and usage information" > "User diagnostic reports". +Include the crash report in the issue in a [code block](https://docs.github.com/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks), a [file attachment](https://docs.github.com/articles/file-attachments-on-issues-and-pull-requests/), or put it in a [gist](https://gist.github.com/) and provide link to that gist. +* **If the problem is related to performance or memory**, include a CPU profile capture with your report, if you're using a GPU then include a GPU profile capture as well. * **If the problem wasn't triggered by a specific action**, describe what you were doing before the problem happened and share more information using the guidelines below. Provide more context by answering these questions: @@ -266,18 +317,24 @@ Include details about your configuration and environment: * **What's the name and version of the OS you're using**? * **Are you running Bittensor in a virtual machine?** If so, which VM software are you using and which operating systems and versions are used for the host and the guest? * **Are you running Bittensor in a dockerized container?** If so, have you made sure that your docker container contains your latest changes and is up to date with Master branch? -* **Are you using [local configuration files](https://docs.learnbittensor.org/getting-started/install-btcli#configuration)** `config.yml` to customize your Bittensor experiment? If so, provide the contents of that config file, preferably in a [code block](https://docs.github.com/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks) or with a link to a [gist](https://gist.github.com/). +* **Are you using [local configuration files](https://docs.learnbittensor.org/getting-started/install-btcli#configuration)** `config.yml` to customize your Bittensor experiment? +* If so, provide the contents of that config file, preferably in a [code block](https://docs.github.com/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks) or with a link to a [gist](https://gist.github.com/). ### Suggesting Enhancements and Features -This section guides you through submitting an enhancement suggestion for Bittensor, including completely new features and minor improvements to existing functionality. Following these guidelines helps maintainers and the community understand your suggestion :pencil: and find related suggestions :mag_right:. +This section guides you through submitting an enhancement suggestion for Bittensor, +including completely new features and minor improvements to existing functionality. Following these guidelines helps +maintainers and the community understand your suggestion and find related suggestions. When you are creating an enhancement suggestion, please [include as many details as possible](#how-do-i-submit-a-good-enhancement-suggestion). #### Before Submitting An Enhancement Suggestion -* **Check the [debugging guide](./DEBUGGING.md).** for tips β€” you might discover that the enhancement is already available. Most importantly, check if you're using the latest version of Bittensor by pulling the latest changes from the Master branch and if you can get the desired behavior by changing [Bittensor's config settings](https://docs.learnbittensor.org/python-api/html/autoapi/bittensor/core/config/). -* **Determine which repository the problem should be reported in: if it has to do with your ML model, then it's likely [Bittensor](https://github.com/opentensor/bittensor). If you are having problems with your emissions or Blockchain, then it is in [subtensor](https://github.com/opentensor/subtensor) +* **Check the [debugging guide](./DEBUGGING.md).** for tips β€” you might discover that the enhancement is already available. +Most importantly, check if you're using the latest version of Bittensor by pulling the latest changes from the Master +branch and if you can get the desired behavior by changing [Bittensor's config settings](https://docs.learnbittensor.org/python-api/html/autoapi/bittensor/core/config/). +* **Determine which repository the problem should be reported in**: if it has to do with unexpected client-side behavior, then it's likely [Bittensor](https://github.com/opentensor/bittensor). +If you are having problems with your emissions or Blockchain, then it is in [subtensor](https://github.com/opentensor/subtensor) #### How To Submit A (Good) Feature Suggestion @@ -287,7 +344,7 @@ Enhancement suggestions are tracked as [GitHub issues](https://guides.github.com * **Provide a step-by-step description of the suggested enhancement** in as many details as possible. * **Provide specific examples to demonstrate the steps**. Include copy/pasteable snippets which you use in those examples, as [Markdown code blocks](https://docs.github.com/get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks). * **Describe the current behavior** and **explain which behavior you expected to see instead** and why. -* **Include screenshots and animated GIFs** which show you following the described steps and clearly demonstrate the problem. You can use [Licecap](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [Silentcast](https://github.com/colinkeenan/silentcast) or [byzanz-record](https://manpages.ubuntu.com/manpages/questing/en/man1/byzanz-record.1.html) on Linux. +* **Include screenshots and animated GIFs** which show you following the described steps and clearly demonstrate the problem. * **Explain why this enhancement would be useful** to most Bittensor users. * **List some other text editors or applications where this enhancement exists.** * **Specify which version of Bittensor are you using?** You can get the version of the Bittensor SDK by executing the `python3 -m bittensor` command. diff --git a/contrib/DEBUGGING.md b/contrib/DEBUGGING.md index 22013abcb8..13b9055376 100644 --- a/contrib/DEBUGGING.md +++ b/contrib/DEBUGGING.md @@ -40,14 +40,14 @@ at the top of your script or source file to enable more verbose output logs. You can also write your own in the code simply: ```python # Bittensor's wallet maintenance class. -wallet = bittensor.wallet() +wallet = bittensor.Wallet() bittensor.logging.debug( f"wallet keypair: {wallet.hotkey}" ) ... # Bittensor's chain state object. -metagraph = bittensor.metagraph(netuid=1) +metagraph = bittensor.Metagraph(netuid=1) bittensor.logging.trace( f"metagraph created! netuid {metagraph.netuid}" ) ``` @@ -55,14 +55,15 @@ bittensor.logging.trace( f"metagraph created! netuid {metagraph.netuid}" ) ## Querying the Network -Ensure you can query the Bittensor network using the Python API. If something is broken with your installation or the chain, this won't work out of the box. Here's an example of how to do this: +Ensure you can query the Bittensor network using the Python API. If something is broken with your installation or the chain, +this won't work out of the box. Here's an example of how to do this: ```python import bittensor bittensor.trace() # Attempt to query through the foundation endpoint. -print(bittensor.prompt("Heraclitus was a ")) +print(bittensor.Subtensor().block) ``` ## Debugging Miners @@ -70,19 +71,19 @@ print(bittensor.prompt("Heraclitus was a ")) First, try registering and running on a testnet: ```bash -btcli register --netuid --subtensor.chain_endpoint wss://test.finney.opentensor.ai:443 +btcli subnets register --netuid --network test ``` If that works, then try to register a miner on mainnet: ```bash -btcli register --netuid +btcli subnets register --netuid ``` See if you can observe your slot specified by UID: ```bash -btcli overview --netuid +btcli wallet overview --netuid ``` Here's an example of how to run a pre-configured miner: @@ -101,16 +102,16 @@ Try to use the Bittensor package to create a wallet, connect to the axon running import bittensor # Bittensor's wallet maintenance class. -wallet = bittensor.wallet() +wallet = bittensor.Wallet() # Bittensor's chain interface. -subtensor = bittensor.subtensor() +subtensor = bittensor.Subtensor() # Bittensor's chain state object. -metagraph = bittensor.metagraph(netuid=1) +metagraph = bittensor.Metagraph(netuid=1) # Instantiate a Bittensor endpoint. -axon = bittensor.axon(wallet=wallet, metagraph=metagraph) +axon = bittensor.Axon(wallet=wallet) # Start servicing messages on the wire. axon.start() @@ -119,10 +120,10 @@ axon.start() subtensor.serve_axon(netuid=1, axon=axon) # Connect to the axon running on slot 10, use the wallet to sign messages. -dendrite = bittensor.text_prompting(keypair=wallet.hotkey, axon=metagraph.axons[10]) +dendrite = bittensor.Dendrite(wallet=wallet) # Send a prompt to this endpoint -dendrite.forward(roles=['user'], messages=['Who is Rick James?']) +dendrite.forward(axon=metagraph.axons[10], roles=['user'], messages=['Who is Rick James?']) ``` > NOTE: It may be helpful to throw in breakpoints such as with `pdb`. diff --git a/contrib/DEVELOPMENT_WORKFLOW.md b/contrib/DEVELOPMENT_WORKFLOW.md index 91e781ffcc..8f9658d331 100644 --- a/contrib/DEVELOPMENT_WORKFLOW.md +++ b/contrib/DEVELOPMENT_WORKFLOW.md @@ -24,7 +24,8 @@ Bittensor's codebase consists of two main branches: **master** and **staging**. **master** -- This is Bittensor's live production branch, which should only be updated by the core development team. This branch is protected, so refrain from pushing or merging into it unless authorized. +- This is Bittensor's live production branch, which should only be updated by the core development team. +- This branch is protected, so refrain from pushing or merging into it unless authorized. **staging** - This branch is continuously updated and is where you propose and merge changes. It's essentially Bittensor's active development branch. @@ -35,7 +36,7 @@ Bittensor's codebase consists of two main branches: **master** and **staging**. - Branch off from: `staging` - Merge back into: `staging` -- Naming convention: `feature//` +- Naming convention: `feat//` Feature branches are used to develop new features for upcoming or future releases. They exist as long as the feature is in development, but will eventually be merged into `staging` or discarded. Always delete your feature branch after merging to avoid unnecessary clutter. @@ -47,6 +48,16 @@ Feature branches are used to develop new features for upcoming or future release Release branches support the preparation of a new production release, allowing for minor bug fixes and preparation of metadata (version number, configuration, etc). All new features should be merged into `staging` and wait for the next big release. +### Fix Branches + +- Branch off from: `staging` +- Merge back into: `staging` +- Naming convention: `fix//` + +Fix branches are used to make bug fixes which are not necessarily worthy of a hotfix, such as an edge case failure that +can wait to go out in the next regular release. Always delete your branch after merging to avoid unnecessary clutter. + + ### Hotfix Branches General workflow: @@ -62,7 +73,7 @@ Hotfix branches are meant for quick fixes in the production environment. When a #### Create a feature branch 1. Branch from the **staging** branch. - 1. Command: `git checkout -b feature/my-feature staging` + 1. Command: `git checkout -b feat/mygithubname/my-feature staging` > Rebase frequently with the updated staging branch so you do not face big conflicts before submitting your pull request. Remember, syncing your changes with other developers could also help you avoid big conflicts. @@ -71,9 +82,9 @@ Hotfix branches are meant for quick fixes in the production environment. When a In other words, integrate your changes into a branch that will be tested and prepared for release. 1. Switch branch to staging: `git checkout staging` -2. Merging feature branch into staging: `git merge --no-ff feature/my-feature` +2. Merging feature branch into staging: `git merge --no-ff feat/mygithubname/my-feature` 3. Pushing changes to staging: `git push origin staging` -4. Delete feature branch: `git branch -d feature/my-feature` (alternatively, this can be navigated on the GitHub web UI) +4. Delete feature branch: `git branch -d feat/mygithubname/my-feature` (alternatively, this can be navigated on the GitHub web UI) This operation is done by Github when merging a PR. @@ -83,7 +94,7 @@ So, what you have to keep in mind is: #### Creating a release branch -1. Create branch from staging: `git checkout -b release/3.4.0/descriptive-message/creator's_name staging` +1. Create branch from staging: `git checkout -b release/3.4.0/descriptive-message/creators_name staging` 2. Updating version with major or minor: `./scripts/update_version.sh major|minor` 3. Commit file changes with new version: `git commit -a -m "Updated version to 3.4.0"` @@ -108,9 +119,8 @@ This step may well lead to a merge conflict (probably even, since we have change #### Creating a hotfix branch 1. Create branch from master: `git checkout -b hotfix/3.3.4/descriptive-message/creator's-name master` -2. Update patch version: `./scripts/update_version.sh patch` -3. Commit file changes with new version: `git commit -a -m "Updated version to 3.3.4"` -4. Fix the bug and commit the fix: `git commit -m "Fixed critical production issue X"` +2. Commit file changes with new version: `git commit -a -m "Updated version to 3.3.4"` +3. Fix the bug and commit the fix: `git commit -m "Fixed critical production issue X"` #### Finishing a Hotfix Branch @@ -133,11 +143,12 @@ Finally, we remove the temporary branch: - `git branch -d hotfix/3.3.4/optional-descriptive-message` ## Continuous Integration (CI) and Continuous Deployment (CD) -Continuous Integration (CI) is a software development practice where members of a team integrate their work frequently. Each integration is verified by an automated build and test process to detect integration errors as quickly as possible. +Continuous Integration (CI) is a software development practice where members of a team integrate their work frequently. +Each integration is verified by an automated build and test process to detect integration errors as quickly as possible. -Continuous Deployment (CD) is a software engineering approach in which software functionalities are delivered frequently through automated deployments. +Continuous Deployment (CD) is a software engineering approach in which software functionalities are delivered frequently +through automated deployments. -- **CircleCI job**: Create jobs in CircleCI to automate the merging of staging into master and release version (needed to release code) and building and testing Bittensor (needed to merge PRs). ## Versioning and Release Notes diff --git a/contrib/RELEASE_GUIDELINES.md b/contrib/RELEASE_GUIDELINES.md index f40003bd68..94634442da 100644 --- a/contrib/RELEASE_GUIDELINES.md +++ b/contrib/RELEASE_GUIDELINES.md @@ -1,87 +1,19 @@ # Release Guidelines -The release manager in charge can release a Bittensor version using two scripts: - - [../scripts/release/versioning.sh](../scripts/release/versioning.sh) - - [../scripts/release/release.sh](../scripts/release/release.sh) - The release manager will need the right permissions for: - - github.com - - pypi.org - - hub.docker.com + - github.com (includes the PyPI credentials) If you are new in this role, ask for the proper setup you need to run this process manually. ## Process of release -1. Create a branch called `release/VERSION`, having VERSION with the version to release. -1. Make sure twine is installed: `pip install twine` -1. Within the release branch: - 1. Update the version executing:`./scripts/release/versioning.sh --update UPDATE_TYPE` - 1. **UPDATE_TYPE** could be *major*, *minor* or *patch*. - 1. Add release notes to CHANGELOG executing: `./scripts/release/add_notes_changelog.sh -A -V NEW_VERSION -P PREVIOUS_TAG -T GH_ACCESS_TOKEN` - 1. **NEW_VERSION**: e.g.: 3.6.4 - 1. **PREVIOUS_TAG**: e.g.: v3.6.3 - 1. **GH_ACCESS_TOKEN**: A github [personal access token](https://docs.github.com/en/enterprise-server@3.4/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) you need. - -1. Test the release branch and verify that it meets the requirements. -1. After merging the release branch; Run the release script - -## Versioning script usage - -Options: - - -U, --update: type of update. It could be major, minor, patch or rc (release candidate). - - -A, --apply: This specifies to apply the release. Without this the versioning will just show a dry run with no changes. - -## Release script usage - -Options: - - -A, --apply: This specifies to apply the release. Without this the release will just show a dry run with no changes. - - -T,--github-token: A github personal access token to interact with the Github API. - -### Github token - -Since you need to use a secret when releasing bittensor (github personal access token), I encourage you to use [pass](https://www.passwordstore.org/) or a similar tool that allows you to store the secret safely and not expose it in the history of the machine you use. - -So you can have: -``` -GITHUB_ACCESS_TOKEN=$(pass github/your_personal_token_with_permissions) -``` - -or -``` -GITHUB_ACCESS_TOKEN=$(whatever you need to get the token safely) -``` - -### Executions - -So, executing the script to release a minor version will be: - -``` -# For a dry run -./scripts/release/release.sh -``` - -``` -# Applying changes -./scripts/release/release.sh --apply --github-token $GITHUB_ACCESS_TOKEN` -``` - -## Checking release - -After the execution of the release script we would have generated: - - A new git tag in [github.com](https://github.com/opentensor/bittensor/tags) - - A new github release in [github.com](https://github.com/opentensor/bittensor/releases) - - A new pip package in [pypi.org](https://pypi.org/project/bittensor/#history) - - A new docker image in [hub.docker.com](https://hub.docker.com/r/opentensorfdn/bittensor/tags) - -## After release - -After a Bittensor release we have to -- Update [cubit](https://github.com/opentensor/cubit). - -### Updating cubit - -1. Updating the [Dockerfile](https://github.com/opentensor/cubit/blob/master/docker/Dockerfile) -1. Building its docker image (follow its README instructions) -1. Push it to hub.docker.com - 1. The generated name will be the same but with `-cubit` in its name +1. Begin to draft a new release in [Github](https://github.com/opentensor/bittensor/releases/new), using the appropriate version tag + 1. Note we follow [semver](https://semver.org/) +2. Create a new branch off of staging, named `changelog/` +3. After generating the release notes, copy these into the [CHANGELOG.md](../CHANGELOG.md) file, with the appropriate header +4. Bump the version in [pyproject.toml](../pyproject.toml) +5. Open a Pull Request against staging for this changelog +6. Once approved and merged into staging, delete the branch, and create a new branch off staging, named `release/` +7. Push this branch, and open a PR against master, which should include the changelog from step 3 +8. Once this passes tests, is approved, and merged to master, run [Build and Publish Python Package](https://github.com/opentensor/bittensor/actions/workflows/release.yml) with the new version +9. Verify the release is successful and pushed to [PyPI](https://pypi.org/project/bittensor/#history) diff --git a/contrib/STYLE.md b/contrib/STYLE.md index 13558ac9e4..9992bd404a 100644 --- a/contrib/STYLE.md +++ b/contrib/STYLE.md @@ -28,7 +28,7 @@ Python's official style guide is PEP 8, which provides conventions for writing c - `Indentation:` Use 4 spaces per indentation level. -- `Line Length:` Limit all lines to a maximum of 79 characters. +- `Line Length:` Limit all lines to a maximum of 79 characters. This is not strict, however, and we follow ruff's default of 88 characters. - `Blank Lines:` Surround top-level function and class definitions with two blank lines. Method definitions inside a class are surrounded by a single blank line. @@ -42,7 +42,7 @@ Python's official style guide is PEP 8, which provides conventions for writing c - Immediately inside parentheses, brackets or braces. - Immediately before a comma, semicolon, or colon. - Immediately before the open parenthesis that starts the argument list of a function call. -- `Comments:` Comments should be complete sentences and should be used to clarify code and are not a substitute for poorly written code. +- `Comments:` Comments should be used to clarify code and are not a substitute for poorly written code. #### For Python @@ -59,20 +59,23 @@ Python's official style guide is PEP 8, which provides conventions for writing c #### More details Use [`ruff` to format](https://docs.astral.sh/ruff/formatter/#the-ruff-formatter) your python code before committing for consistency across such a large pool of contributors. -Black code [style](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#code-style) ensures consistent and opinionated code formatting. -Ruff automatically formats your Python code according to the Black style guide, enhancing code readability and maintainability. -Key Features of ruff & Black code style: +Ruff automatically formats your Python code according to the style guide, enhancing code readability and maintainability. + +Key Features of ruff code style: Consistency: ruff enforces a single, consistent coding style across your project, eliminating style debates and allowing developers to focus on code logic. - Readability: By applying a standard formatting style, Black improves code readability, making it easier to understand and collaborate on projects. + Readability: By applying a standard formatting style, ruff improves code readability, making it easier to understand and collaborate on projects. Automation: ruff automates the code formatting process, saving time and effort. It eliminates the need for manual formatting and reduces the likelihood of inconsistencies. +We have added a helper tool `make check` to run the ruff formatter, and all linters in the codebase. + ### Naming Conventions -- `Classes:` Class names should normally use the CapWords Convention. +- `Classes:` Class names should normally use the CamelCase Convention. + - `Functions and Variables:` Function names should be lowercase, with words separated by underscores as necessary to improve readability. Variable names follow the same convention as function names. - `Constants:` Constants are usually defined on a module level and written in all capital letters with underscores separating words. @@ -159,7 +162,7 @@ Nothing more needs to be said; if the reader wonders what the typo was, she can If you’re committing something like this at the command line, it’s easy to use the -m option to git commit: - $ git commit -m"Fix typo in introduction to user guide" + $ git commit -m "Fix typo in introduction to user guide" However, when a commit merits a bit of explanation and context, you need to write a body. For example: @@ -317,7 +320,7 @@ The benefit of using `--fixup` and interactive rebase is that it keeps your comm #### Pull Request and Squashing Commits Caveats -While atomic commits are great for development and for understanding the changes within the branch, the commit history can get messy when merging to the main branch. To keep a cleaner and more understandable commit history in our main branch, we encourage squashing all the commits of a PR into one when merging. +While atomic commits are great for development and for understanding the changes within the branch, the commit history can get messy when merging to the master branch. To keep a cleaner and more understandable commit history in our main branch, we encourage squashing all the commits of a PR into one when merging. This single commit should provide an overview of the changes that the PR introduced. It should follow the guidelines for atomic commits (an atomic commit is complete, self-contained, and understandable) but on the scale of the entire feature, task, or fix that the PR addresses. This approach combines the benefits of atomic commits during development with a clean commit history in our main branch. diff --git a/contrib/TESTING.md b/contrib/TESTING.md index 59dc1d81a3..8e38a7bb17 100644 --- a/contrib/TESTING.md +++ b/contrib/TESTING.md @@ -34,7 +34,7 @@ import bittensor def test_some_functionality(): # Setup any necessary objects or state. - wallet = bittensor.wallet() + wallet = bittensor.Wallet() # Call the function you're testing. result = wallet.create_new_coldkey() @@ -50,24 +50,28 @@ In this example, we're testing the `create_new_coldkey` function of the `wallet` In some cases, you may need to mock certain functions or objects to isolate the functionality you're testing. Bittensor uses the `unittest.mock` library for this. Here's a simple example from the axon unittest: ```python -def test_axon_start(self): - mock_wallet = MagicMock( +import bittensor +import pytest + +def test_axon_start(mocker): + mock_wallet = mocker.Mock( spec=bittensor.Wallet, - coldkey=MagicMock(), - coldkeypub=MagicMock( + coldkey=mocker.Mock(spec=str), + coldkeypub=mocker.Mock( # mock ss58 address ss58_address="5DD26kC2kxajmwfbbZmVmxhrY9VeeyR1Gpzy9i8wxLUg6zxm" ), - hotkey=MagicMock( + hotkey=mocker.Mock( ss58_address="5CtstubuSoVLJGCXkiWRNKrrGg2DVBZ9qMs2qYTLsZR4q1Wg" ), ) - axon = bittensor.axon(wallet=mock_wallet, metagraph=None) + axon = bittensor.Axon(wallet=mock_wallet, config=bittensor.Config()) axon.start() assert axon.server._state.stage == grpc._server._ServerStage.STARTED ``` In this example, we're mocking the `coldkey`, `coldkeypub` and `hotkey` for a wallet. This allows us to test how the axon code behaves when `bittensor.Wallet()` would normally be called, without actually calling the constructor. + ## Test Coverage It's important to ensure that your tests cover as much of your code as possible. You can use the `pytest-cov` plugin to measure your test coverage. To use it, first install it with pip: @@ -86,9 +90,43 @@ This will output a coverage report showing the percentage of your code that's co Remember, while high test coverage is a good goal, it's also important to write meaningful tests. A test isn't very useful if it doesn't accurately represent the conditions under which your code will run. +## E2E Testing + +The Bittensor SDK includes end-to-end (E2E) tests that validate higher-level SDK workflows. All E2E tests live under `tests/e2e_tests/`. + +When you add or modify an E2E test module (for example, `tests/e2e_tests/test_some_logic.py`), you must run the affected tests locally before opening a pull request: + +```bash +pytest -q tests/e2e_tests/test_some_logic.py +``` + +Important: E2E tests are implemented in two variants within the same module: +- A synchronous test, e.g. `def test_some_logic(...):` using subtensor +- An asynchronous test, e.g. `@pytest.mark.asyncio` + `async def test_some_logic_async(...):` using async_subtensor + +If you change logic covered by an E2E module, ensure that both the sync and async variants pass. You can also run them explicitly: + +```bash +pytest -q tests/e2e_tests/test_some_logic.py -k test_some_logic # or test_some_logic_async +``` + +Async variants must use the `_async` suffix to keep test discovery and review consistent. + +To ensure that your changes are tested correctly and that all tests run against the expected tooling versions, we recommend installing the development dependencies provided by the project: + +```bash +python -m pip install bittensor[dev] +``` + +The dev extra includes all required testing and development dependencies pinned to compatible versions. Using this setup helps avoid issues caused by mismatched or missing test-related packages and ensures your local test environment is aligned with CI. + +We strongly recommend performing all development and testing inside a pre-created Python virtual environment to avoid dependency conflicts with system or globally installed packages. + ## Continuous Integration -Bittensor uses CircleCI for continuous integration. This means that every time you push changes to the repository, all tests are automatically run. If any tests fail, you'll be notified so you can fix the issue before merging your changes. +Bittensor uses GH Actions for continuous integration. This means that every time you push changes to the repository, all tests are automatically run. If any tests fail, you'll be notified so you can fix the issue before merging your changes. + +Remember, tests are an important part of maintaining the health of a codebase. They help catch issues early and make it easier to add new features or refactor existing code. -Remember, tests are an important part of maintaining the health of a codebase. They help catch issues early and make it easier to add new features or refactor existing code. Happy testing! \ No newline at end of file +## Happy testing! \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 173aa491d6..ee95f00e4b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "bittensor" -version = "10.0.0" +version = "10.0.1" description = "Bittensor SDK" readme = "README.md" authors = [ @@ -58,7 +58,6 @@ dev = [ "aioresponses==0.7.6", "factory-boy==3.3.0", "types-requests", - "torch>=1.13.1,<3.0" ] torch = [ "torch>=1.13.1,<3.0" diff --git a/scripts/create_wallet.sh b/scripts/create_wallet.sh deleted file mode 100755 index d0ee08b69f..0000000000 --- a/scripts/create_wallet.sh +++ /dev/null @@ -1,13 +0,0 @@ -mkdir -p ~/.bittensor/wallets/default/hotkeys -rm ~/.bittensor/wallets/default/coldkeypub.txt -rm ~/.bittensor/wallets/default/hotkeys/default -touch ~/.bittensor/wallets/default/coldkeypub.txt -touch ~/.bittensor/wallets/default/hotkeys/default -echo "0x74acaa8d7829336dfff7569f19225818cc593335b9aafcde3f69db23c3538561" >> ~/.bittensor/wallets/default/coldkeypub.txt -echo '{"accountId": "0x9cf7085aa3304c21dc0f571c0134abb12f2e8e1bc9dbfc82440b8d6ba7908655", "publicKey": "0x9cf7085aa3304c21dc0f571c0134abb12f2e8e1bc9dbfc82440b8d6ba7908655", "secretPhrase": "document usage siren cross across crater shrug jump marine distance absurd caught", "secretSeed": "0x2465ae0757117bea271ad622e1cd0c4b319c96896a3c7d9469a68e63cf7f9646", "ss58Address": "5FcWiCiFoSspGGocSxzatNL5kT6cjxjXQ9LuAuYbvFNUqcfX"}' >> ~/.bittensor/wallets/default/hotkeys/default -chmod 0600 ~/.bittensor/wallets/default/coldkeypub.txt -chmod 0600 ~/.bittensor/wallets/default/hotkeys/default -echo "~/.bittensor/wallets/default/coldkeypub.txt" -cat ~/.bittensor/wallets/default/coldkeypub.txt -echo "~/.bittensor/wallets/default/hotkeys/default" -cat ~/.bittensor/wallets/default/hotkeys/default \ No newline at end of file diff --git a/tests/e2e_tests/test_dendrite.py b/tests/e2e_tests/test_dendrite.py index 5ceb4aa6ee..fda19d35de 100644 --- a/tests/e2e_tests/test_dendrite.py +++ b/tests/e2e_tests/test_dendrite.py @@ -112,7 +112,6 @@ def test_dendrite(subtensor, templates, alice_wallet, bob_wallet): assert bob_neuron.active is True assert bob_neuron.validator_permit is False assert bob_neuron.validator_trust == 0.0 - assert bob_neuron.pruning_score == 0 with templates.validator(bob_wallet, alice_sn.netuid): time.sleep(5) # wait for 5 seconds for the Validator to process @@ -134,7 +133,6 @@ def test_dendrite(subtensor, templates, alice_wallet, bob_wallet): assert updated_neuron.validator_permit is True assert updated_neuron.hotkey == bob_wallet.hotkey.ss58_address assert updated_neuron.coldkey == bob_wallet.coldkey.ss58_address - assert updated_neuron.pruning_score != 0 @pytest.mark.asyncio @@ -225,7 +223,6 @@ async def test_dendrite_async(async_subtensor, templates, alice_wallet, bob_wall assert bob_neuron.active is True assert bob_neuron.validator_permit is False assert bob_neuron.validator_trust == 0.0 - assert bob_neuron.pruning_score == 0 async with templates.validator(bob_wallet, alice_sn.netuid): await asyncio.sleep(5) # wait for 5 seconds for the Validator to process @@ -248,4 +245,3 @@ async def test_dendrite_async(async_subtensor, templates, alice_wallet, bob_wall assert updated_neuron.validator_permit is True assert updated_neuron.hotkey == bob_wallet.hotkey.ss58_address assert updated_neuron.coldkey == bob_wallet.coldkey.ss58_address - assert updated_neuron.pruning_score != 0 diff --git a/tests/e2e_tests/test_incentive.py b/tests/e2e_tests/test_incentive.py index 37af7b0fc5..76f1f6cf41 100644 --- a/tests/e2e_tests/test_incentive.py +++ b/tests/e2e_tests/test_incentive.py @@ -58,14 +58,11 @@ def test_incentive(subtensor, templates, alice_wallet, bob_wallet): assert alice_neuron.validator_trust == 0 assert alice_neuron.incentive == 0 assert alice_neuron.consensus == 0 - assert alice_neuron.rank == 0 bob_neuron = subtensor.neurons.neurons(netuid=alice_sn.netuid)[1] assert bob_neuron.incentive == 0 assert bob_neuron.consensus == 0 - assert bob_neuron.rank == 0 - assert bob_neuron.trust == 0 # update weights_set_rate_limit for fast-blocks tempo = subtensor.subnets.tempo(alice_sn.netuid) @@ -123,14 +120,11 @@ def test_incentive(subtensor, templates, alice_wallet, bob_wallet): assert alice_neuron.validator_trust > 0.99 assert alice_neuron.incentive < 0.5 assert alice_neuron.consensus < 0.5 - assert alice_neuron.rank < 0.5 bob_neuron = neurons[1] assert bob_neuron.incentive > 0.5 assert bob_neuron.consensus > 0.5 - assert bob_neuron.rank > 0.5 - assert bob_neuron.trust == 1 bonds = subtensor.subnets.bonds(alice_sn.netuid) @@ -198,14 +192,11 @@ async def test_incentive_async(async_subtensor, templates, alice_wallet, bob_wal assert alice_neuron.validator_trust == 0 assert alice_neuron.incentive == 0 assert alice_neuron.consensus == 0 - assert alice_neuron.rank == 0 bob_neuron = (await async_subtensor.neurons.neurons(netuid=alice_sn.netuid))[1] assert bob_neuron.incentive == 0 assert bob_neuron.consensus == 0 - assert bob_neuron.rank == 0 - assert bob_neuron.trust == 0 # update weights_set_rate_limit for fast-blocks tempo = await async_subtensor.subnets.tempo(alice_sn.netuid) @@ -268,14 +259,11 @@ async def test_incentive_async(async_subtensor, templates, alice_wallet, bob_wal assert alice_neuron.validator_trust > 0.99 assert alice_neuron.incentive < 0.5 assert alice_neuron.consensus < 0.5 - assert alice_neuron.rank < 0.5 bob_neuron = neurons[1] assert bob_neuron.incentive > 0.5 assert bob_neuron.consensus > 0.5 - assert bob_neuron.rank > 0.5 - assert bob_neuron.trust == 1 bonds = await async_subtensor.subnets.bonds(alice_sn.netuid) diff --git a/tests/integration_tests/test_metagraph_integration.py b/tests/integration_tests/test_metagraph_integration.py index 005b1ae0c3..a84ead921c 100644 --- a/tests/integration_tests/test_metagraph_integration.py +++ b/tests/integration_tests/test_metagraph_integration.py @@ -84,8 +84,6 @@ def test_state_dict(self): assert "n" in state assert "block" in state assert "stake" in state - assert "ranks" in state - assert "trust" in state assert "consensus" in state assert "validator_trust" in state assert "incentive" in state @@ -105,11 +103,9 @@ def test_properties(self): metagraph.addresses metagraph.validator_trust metagraph.S - metagraph.R metagraph.I metagraph.E metagraph.C - metagraph.T metagraph.Tv metagraph.D metagraph.B diff --git a/tests/unit_tests/extrinsics/asyncex/test_mev_shield.py b/tests/unit_tests/extrinsics/asyncex/test_mev_shield.py index a9475af019..4143252636 100644 --- a/tests/unit_tests/extrinsics/asyncex/test_mev_shield.py +++ b/tests/unit_tests/extrinsics/asyncex/test_mev_shield.py @@ -15,7 +15,7 @@ async def test_wait_for_extrinsic_by_hash_success(subtensor, mocker): shield_id = "shield_id_123" submit_block_hash = "0xblockhash" starting_block = 100 - current_block = 101 + current_block = 100 mocked_get_block_number = mocker.patch.object( subtensor.substrate, @@ -76,7 +76,7 @@ async def test_wait_for_extrinsic_by_hash_decryption_failed(subtensor, mocker): shield_id = "shield_id_123" submit_block_hash = "0xblockhash" starting_block = 100 - current_block = 101 + current_block = 100 mocked_get_block_number = mocker.patch.object( subtensor.substrate, @@ -174,9 +174,9 @@ async def test_wait_for_extrinsic_by_hash_timeout(subtensor, mocker): # Asserts mocked_get_block_number.assert_awaited_once_with(submit_block_hash) - assert mocked_wait_for_block.await_count == 3 - assert mocked_get_block_hash.await_count == 3 - assert mocked_get_extrinsics.await_count == 3 + assert mocked_wait_for_block.await_count == 4 + assert mocked_get_block_hash.await_count == 4 + assert mocked_get_extrinsics.await_count == 4 assert result is None diff --git a/tests/unit_tests/extrinsics/asyncex/test_proxy.py b/tests/unit_tests/extrinsics/asyncex/test_proxy.py index d5287ec407..1afa67b4f8 100644 --- a/tests/unit_tests/extrinsics/asyncex/test_proxy.py +++ b/tests/unit_tests/extrinsics/asyncex/test_proxy.py @@ -259,6 +259,7 @@ async def test_kill_pure_proxy_extrinsic(subtensor, mocker): raise_error=False, wait_for_inclusion=True, wait_for_finalization=True, + wait_for_revealed_execution=True, ) assert response == mocked_proxy_extrinsic.return_value diff --git a/tests/unit_tests/extrinsics/test_mev_shield.py b/tests/unit_tests/extrinsics/test_mev_shield.py index 514f58e11a..44b7cec7a9 100644 --- a/tests/unit_tests/extrinsics/test_mev_shield.py +++ b/tests/unit_tests/extrinsics/test_mev_shield.py @@ -13,7 +13,7 @@ def test_wait_for_extrinsic_by_hash_success(subtensor, mocker): shield_id = "shield_id_123" submit_block_hash = "0xblockhash" starting_block = 100 - current_block = 101 + current_block = 100 mocked_get_block_number = mocker.patch.object( subtensor.substrate, "get_block_number", return_value=starting_block @@ -67,7 +67,7 @@ def test_wait_for_extrinsic_by_hash_decryption_failed(subtensor, mocker): shield_id = "shield_id_123" submit_block_hash = "0xblockhash" starting_block = 100 - current_block = 101 + current_block = 100 mocked_get_block_number = mocker.patch.object( subtensor.substrate, "get_block_number", return_value=starting_block @@ -152,9 +152,9 @@ def test_wait_for_extrinsic_by_hash_timeout(subtensor, mocker): # Asserts mocked_get_block_number.assert_called_once_with(submit_block_hash) - assert mocked_wait_for_block.call_count == 3 - assert mocked_get_block_hash.call_count == 3 - assert mocked_get_extrinsics.call_count == 3 + assert mocked_wait_for_block.call_count == 4 + assert mocked_get_block_hash.call_count == 4 + assert mocked_get_extrinsics.call_count == 4 assert result is None diff --git a/tests/unit_tests/extrinsics/test_proxy.py b/tests/unit_tests/extrinsics/test_proxy.py index b718d86d65..ed60d84dd6 100644 --- a/tests/unit_tests/extrinsics/test_proxy.py +++ b/tests/unit_tests/extrinsics/test_proxy.py @@ -244,6 +244,7 @@ def test_kill_pure_proxy_extrinsic(subtensor, mocker): raise_error=False, wait_for_inclusion=True, wait_for_finalization=True, + wait_for_revealed_execution=True, ) assert response == mocked_proxy_extrinsic.return_value diff --git a/tests/unit_tests/test_metagraph.py b/tests/unit_tests/test_metagraph.py index 280e885265..f42f6d2bdb 100644 --- a/tests/unit_tests/test_metagraph.py +++ b/tests/unit_tests/test_metagraph.py @@ -61,14 +61,6 @@ async def test_set_metagraph_attributes(mock_environment): is True ) - assert ( - np.array_equal( - metagraph.trust, - np.array([neuron.trust for neuron in neurons], dtype=np.float32), - ) - is True - ) - assert ( np.array_equal( metagraph.consensus, @@ -186,8 +178,6 @@ def test_deepcopy(mock_environment): assert np.array_equal(copied_metagraph.uids, metagraph.uids) assert np.array_equal(copied_metagraph.stake, metagraph.stake) assert np.array_equal(copied_metagraph.total_stake, metagraph.total_stake) - assert np.array_equal(copied_metagraph.ranks, metagraph.ranks) - assert np.array_equal(copied_metagraph.trust, metagraph.trust) assert np.array_equal(copied_metagraph.consensus, metagraph.consensus) assert np.array_equal(copied_metagraph.validator_trust, metagraph.validator_trust) assert np.array_equal(copied_metagraph.incentive, metagraph.incentive) @@ -240,8 +230,6 @@ def test_copy(mock_environment): assert np.array_equal(copied_metagraph.uids, metagraph.uids) assert np.array_equal(copied_metagraph.stake, metagraph.stake) assert np.array_equal(copied_metagraph.total_stake, metagraph.total_stake) - assert np.array_equal(copied_metagraph.ranks, metagraph.ranks) - assert np.array_equal(copied_metagraph.trust, metagraph.trust) assert np.array_equal(copied_metagraph.consensus, metagraph.consensus) assert np.array_equal(copied_metagraph.validator_trust, metagraph.validator_trust) assert np.array_equal(copied_metagraph.incentive, metagraph.incentive)