diff --git a/docker-compose.yml b/docker-compose.yml index 4086f43..36d0076 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,9 +1,9 @@ version: '2' services: ipfs-daemon: - image: 'ipfs/kubo:v0.18.1' + image: 'ipfs/kubo:v0.39.0' ports: - "4001:4001" - "5001:5001" user: "ipfs" - command: [ "daemon", "--enable-pubsub-experiment" ] + command: [ "daemon", "--enable-pubsub-experiment", "--enable-namesys-pubsub", "--routing=dhtclient" ] diff --git a/install-run-ipfs.sh b/install-run-ipfs.sh index a4bf3c8..f40377d 100755 --- a/install-run-ipfs.sh +++ b/install-run-ipfs.sh @@ -1,6 +1,6 @@ #! /bin/sh -wget https://dist.ipfs.io/kubo/v0.18.1/kubo_v0.18.1_linux-amd64.tar.gz -O /tmp/kubo_linux-amd64.tar.gz +wget https://dist.ipfs.io/kubo/v0.39.0/kubo_v0.39.0_linux-amd64.tar.gz -O /tmp/kubo_linux-amd64.tar.gz tar -xvf /tmp/kubo_linux-amd64.tar.gz export PATH=$PATH:$PWD/kubo/ -ipfs init -ipfs daemon --enable-pubsub-experiment --routing=dhtclient & +ipfs init --profile server +ipfs daemon --enable-pubsub-experiment --enable-namesys-pubsub --routing=dhtclient & diff --git a/mac-install-run-ipfs.sh b/mac-install-run-ipfs.sh index dd46049..7c8538e 100755 --- a/mac-install-run-ipfs.sh +++ b/mac-install-run-ipfs.sh @@ -1,6 +1,6 @@ #! /bin/sh -wget https://dist.ipfs.io/kubo/v0.18.1/kubo_v0.18.1_darwin-arm64.tar.gz -O /tmp/kubo_darwin-arm64.tar.gz +wget https://dist.ipfs.io/kubo/v0.39.0/kubo_v0.39.0_darwin-arm64.tar.gz -O /tmp/kubo_darwin-arm64.tar.gz tar -xvf /tmp/kubo_darwin-arm64.tar.gz export PATH=$PATH:$PWD/kubo/ -ipfs init -ipfs daemon --enable-pubsub-experiment --routing=dhtclient & +ipfs init --profile server +ipfs daemon --enable-pubsub-experiment --enable-namesys-pubsub --routing=dhtclient & diff --git a/src/main/java/io/ipfs/api/IPFS.java b/src/main/java/io/ipfs/api/IPFS.java index 60b1067..1cbdbea 100644 --- a/src/main/java/io/ipfs/api/IPFS.java +++ b/src/main/java/io/ipfs/api/IPFS.java @@ -832,7 +832,7 @@ public List add() throws IOException { } public List list() throws IOException { - return ((List)retrieveMap("bootstrap/list").get("Peers")) + return ((List)retrieveMap("bootstrap/list?expand-auto=true").get("Peers")) .stream().map(x -> new MultiAddress(x)).collect(Collectors.toList()); } diff --git a/src/main/java/io/ipfs/api/Peer.java b/src/main/java/io/ipfs/api/Peer.java index 551011c..9dd42b7 100644 --- a/src/main/java/io/ipfs/api/Peer.java +++ b/src/main/java/io/ipfs/api/Peer.java @@ -2,6 +2,7 @@ import io.ipfs.cid.*; import io.ipfs.multiaddr.*; +import io.ipfs.multibase.Base58; import io.ipfs.multihash.*; import java.util.*; @@ -9,12 +10,12 @@ public class Peer { public final MultiAddress address; - public final Multihash id; + public final Cid id; public final long latency; public final String muxer; public final Object streams; - public Peer(MultiAddress address, Multihash id, long latency, String muxer, Object streams) { + public Peer(MultiAddress address, Cid id, long latency, String muxer, Object streams) { this.address = address; this.id = id; this.latency = latency; @@ -27,10 +28,20 @@ public static Peer fromJSON(Object json) { throw new IllegalStateException("Incorrect json for Peer: " + JSONParser.toString(json)); Map m = (Map) json; Function val = key -> (String) m.get(key); - long latency = val.apply("Latency").length() > 0 ? Long.parseLong(val.apply("Latency")) : -1; - return new Peer(new MultiAddress(val.apply("Addr")), Cid.decode(val.apply("Peer")), latency, val.apply("Muxer"), val.apply("Streams")); + Cid peer = decodePeerId(val.apply("Peer")); + long latency = m.containsKey("Latency") ? Long.parseLong(val.apply("Latency")) : -1; + return new Peer(new MultiAddress(val.apply("Addr")), peer, latency, val.apply("Muxer"), val.apply("Streams")); } + // See https://github.com/Peergos/Peergos/blob/81064fdb2cdf6b6fe126cf6a20d4d40ecd148938/src/peergos/shared/io/ipfs/Cid.java#L148 + public static Cid decodePeerId(String peerId) { + if (peerId.startsWith("1")) { + // convert base58 encoded identity multihash to cidV1 + Multihash hash = Multihash.deserialize(Base58.decode(peerId)); + return new Cid(1, Cid.Codec.Libp2pKey, hash.getType(), hash.getHash()); + } + return Cid.decode(peerId); + } @Override public String toString() { return id + "@" + address; diff --git a/src/test/java/io/ipfs/api/APITest.java b/src/test/java/io/ipfs/api/APITest.java index 2b291f3..4ea3b16 100644 --- a/src/test/java/io/ipfs/api/APITest.java +++ b/src/test/java/io/ipfs/api/APITest.java @@ -7,6 +7,7 @@ import org.junit.*; import java.io.*; +import java.nio.charset.StandardCharsets; import java.nio.file.*; import java.util.*; import java.util.function.*; @@ -396,31 +397,40 @@ public void rawLeafNodePinUpdate() throws IOException { @Test public void indirectPinTest() throws IOException { - Multihash EMPTY = ipfs.object._new(Optional.empty()).hash; - io.ipfs.api.MerkleNode data = ipfs.object.patch(EMPTY, "set-data", Optional.of("childdata".getBytes()), Optional.empty(), Optional.empty()); - Multihash child = data.hash; + String path = "/test/indirectPinTest-" + UUID.randomUUID(); + ipfs.files.write(path + "/content", new NamedStreamable.ByteArrayWrapper("something".getBytes(StandardCharsets.UTF_8)), true, true); + Multihash content = Multihash.decode((String) ipfs.files.stat(path + "/content").get("Hash")); - io.ipfs.api.MerkleNode tmp1 = ipfs.object.patch(EMPTY, "set-data", Optional.of("parent1_data".getBytes()), Optional.empty(), Optional.empty()); - Multihash parent1 = ipfs.object.patch(tmp1.hash, "add-link", Optional.empty(), Optional.of(child.toString()), Optional.of(child)).hash; - ipfs.pin.add(parent1); + // adding one more extra entry to parent1 to keep its hash different from parent2 + ipfs.files.mkdir(path + "/parent1", true); + ipfs.files.write(path + "/parent1/content1", new NamedStreamable.ByteArrayWrapper("somethingelse".getBytes(StandardCharsets.UTF_8)), true, true); + ipfs.files.cp("/ipfs/" + content, path + "/parent1/content2", true); + + ipfs.files.mkdir(path + "/parent2", true); + ipfs.files.cp("/ipfs/" + content, path + "/parent2/content", true); - io.ipfs.api.MerkleNode tmp2 = ipfs.object.patch(EMPTY, "set-data", Optional.of("parent2_data".getBytes()), Optional.empty(), Optional.empty()); - Multihash parent2 = ipfs.object.patch(tmp2.hash, "add-link", Optional.empty(), Optional.of(child.toString()), Optional.of(child)).hash; + Multihash parent1 = Multihash.decode((String) ipfs.files.stat(path + "/parent1").get("Hash")); + Multihash parent2 = Multihash.decode((String) ipfs.files.stat(path + "/parent2").get("Hash")); + + ipfs.pin.add(parent1); ipfs.pin.add(parent2); ipfs.pin.rm(parent1, true); Map ls = ipfs.pin.ls(IPFS.PinType.all); - boolean childPresent = ls.containsKey(child); + boolean childPresent = ls.containsKey(content); if (!childPresent) - throw new IllegalStateException("Child not present!"); + throw new IllegalStateException("Child not present: " + ls); ipfs.repo.gc(); Map ls2 = ipfs.pin.ls(IPFS.PinType.all); - boolean childPresentAfterGC = ls2.containsKey(child); + boolean childPresentAfterGC = ls2.containsKey(content); if (!childPresentAfterGC) - throw new IllegalStateException("Child not present!"); -} + throw new IllegalStateException("Child not present:" + ls); + ipfs.files.rm(path, true, true); + } + + @Ignore("RPC API removed") @Test public void objectPatch() throws IOException { MerkleNode obj = ipfs.object._new(Optional.empty()); @@ -462,6 +472,7 @@ public void refsTest() throws IOException { } } + @Ignore("RPC API removed") @Test public void objectTest() throws IOException { MerkleNode _new = ipfs.object._new(Optional.empty()); @@ -489,10 +500,8 @@ public void bulkBlockTest() throws IOException { List bulkPut = ipfs.block.put(Arrays.asList(raw, raw, raw, raw, raw), Optional.of("cbor")); List hashes = bulkPut.stream().map(m -> m.hash).collect(Collectors.toList()); byte[] result = ipfs.block.get(hashes.get(0)); - System.out.println(); } -// @Ignore // Ignored because ipfs frequently times out internally in the publish call @Test public void publish() throws Exception { // JSON document @@ -514,6 +523,15 @@ public void publish() throws Exception { assertEquals("Should be equals", resolved, "/ipfs/" + merkleNode.hash); } + @Test + public void resolveName() throws Exception { + // Resolve from DNSLinked domain + String resolved = ipfs.name.resolve("docs.ipfs.tech"); + assertNotNull(resolved); + assertTrue(resolved.startsWith("/ipfs/")); + assertTrue(resolved.length() > 20); // this may change (content and encoding as well) + } + @Test public void pubsubSynchronous() { String topic = "topic" + System.nanoTime(); @@ -587,7 +605,6 @@ public void merkleLinkInMap() throws IOException { // These commands can be used to reproduce this on the command line String reproCommand1 = "printf \"" + toEscapedHex(rawTarget) + "\" | ipfs block put --format=cbor"; String reproCommand2 = "printf \"" + toEscapedHex(rawSource) + "\" | ipfs block put --format=cbor"; - System.out.println(); } @Test @@ -652,7 +669,6 @@ public void rootMerkleLink() throws IOException { // These commands can be used to reproduce this on the command line String reproCommand1 = "printf \"" + toEscapedHex(rawTarget) + "\" | ipfs block put --format=cbor"; String reproCommand2 = "printf \"" + toEscapedHex(obj2) + "\" | ipfs block put --format=cbor"; - System.out.println(); } /** @@ -672,7 +688,6 @@ public void rootNull() throws IOException { // These commands can be used to reproduce this on the command line String reproCommand1 = "printf \"" + toEscapedHex(obj) + "\" | ipfs block put --format=cbor"; - System.out.println(); } /** @@ -754,7 +769,6 @@ public void dhtTest() throws IOException { @Test public void localId() throws Exception { Map id = ipfs.id(); - System.out.println(); } @Test @@ -815,13 +829,15 @@ public void versionTest() throws IOException { @Test public void swarmTestFilters() throws IOException { + // on GH CI we run this in "server" profile that packs a TON of filters + // See https://github.com/ipfs/kubo/blob/c1fd4d70f58e682bfe73fa4b50d17581c823c671/config/profile.go#L27 Map listenAddrs = ipfs.swarm.listenAddrs(); Map localAddrs = ipfs.swarm.localAddrs(true); String multiAddrFilter = "/ip4/192.168.0.0/ipcidr/16"; Map rm = ipfs.swarm.rmFilter(multiAddrFilter); Map filters = ipfs.swarm.filters(); List filtersList = (List)filters.get("Strings"); - Assert.assertNull("Filters empty", filtersList); + Assert.assertTrue("Filters empty", filtersList == null || !filtersList.contains(multiAddrFilter)); Map added = ipfs.swarm.addFilter(multiAddrFilter); filters = ipfs.swarm.filters(); @@ -856,6 +872,8 @@ public void bitswapTest() throws IOException { Map stat = ipfs.bitswap.stat(); Map stat2 = ipfs.bitswap.stat(true); } + + @Ignore("AutoConf.Enabled=true is default; prevents bootstrap removal") @Test public void bootstrapTest() throws IOException { List bootstrap = ipfs.bootstrap.list();