-
Notifications
You must be signed in to change notification settings - Fork 16
/
index.html
5098 lines (5032 loc) · 177 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>DID Method Rubric v1.0</title>
<script src="https://www.w3.org/Tools/respec/respec-w3c" defer="defer" class="remove"></script>
<script class='remove' src="./common.js"></script>
<script class='remove'>
// See https://github.com/w3c/respec/wiki/ for how to configure ReSpec
var respecConfig = {
// the W3C WG and public mailing list
group: "did",
wgPublicList: "public-did-wg",
// the specification's short name, as in http://www.w3.org/TR/short-name/
shortName: "did-rubric",
// specification status (e.g., WD, LCWD, NOTE, etc.). If in doubt use ED.
specStatus: "FPWD-NOTE",
// Editor's Draft URL
edDraftURI: "https://w3c.github.io/did-rubric/",
// subtitle for the spec
// subtitle: "A reproducible method for evaluating DID Methods",
// if you wish the publication date to be other than today, set this
//publishDate: "2021-08-26",
//previousMaturity: "ED",
// automatically allow term pluralization
pluralize: true,
// extend the bibliography entries
localBiblio: ccg.localBiblio,
github: {
repoURL: "https://github.com/w3c/did-rubric/",
branch: "main"
},
includePermaLInks: false,
noRecTrack: "true",
maxTocLevel: 3,
// list of specification editors
editors: [{
name: "Joe Andrieu",
company: "Invited Expert",
url: "http://legreq.com",
w3cid: 97261
}, {
name: "Ryan Grant",
url: "https://contract.design/",
company: "Digital Contract Design",
w3cid: 129181
}, {
name: "Daniel Hardman",
url: "https://www.linkedin.com/in/danielhardman/",
company: "Invited Expert",
w3cid: 108790
}],
// list of specification authors
authors: [{
name: "Joe Andrieu",
company: "Invited Expert",
url: "http://legreq.com",
w3cid: 97261
},
{
name: "Daniel Hardman",
url: "https://www.linkedin.com/in/danielhardman/",
company: "Invited Expert",
w3cid: 108790
},
{
name: "Shannon Appelcline"
},
{
name: "Amy Guy"
},
{
name: "Joachim Lohkamp"
},
{
name: "Drummond Reed",
url: "https://www.linkedin.com/in/drummondreed/",
company: "Evernym",
companyURL: "https://www.evernym.com/",
w3cid: 3096
},
{
name: "Markus Sabadello",
url: "https://www.linkedin.com/in/markus-sabadello-353a0821",
company: "Danube Tech",
companyURL: "https://danubetech.com/",
w3cid: 46729
},
{
name: "Oliver Terbu"
}
]
};
</script>
</head>
<body>
<section id="abstract">
<p>
The communities behind Decentralized Identifiers (DIDs) bring together a
diverse group of contributors who have decidedly different notions of exactly
what "decentralization" means.
</p>
<p>
Rather than attempting to resolve this potentially unresolvable question, we
propose a rubric — a scoring guide used to evaluate performance, a
product, or a project — that teaches how to evaluate a given DID Method
according to one's own requirements.
</p>
<p>
This rubric presents a set of criteria which an <a>Evaluator</a> can apply to
any DID Method based on the use cases most relevant to them. We avoid reducing
the Evaluation to a single number because the criteria tend to be multidimensional
and many of the possible responses are not necessarily good or bad. It is up
to the <a>Evaluator</a> to understand how each response in each criteria might
illuminate favorable or unfavorable consequences for their needs.
</p>
<p>
This rubric is a collection of criteria for creating Evaluation
Reports that assist people in choosing a DID Method.
While this rubric assists in the evaluation of many aspects of a
DID Method, it is not exhaustive.
</p>
</section>
<section id="sotd">
</section>
<section class="informative">
<h2>Introduction</h2>
<section>
<h3>Background</h3>
<p>
A rubric is a tool used in academia to communicate expectations and evaluate
performance. It consists of a set of criteria to be evaluated, possible
responses for each criteria, and a scoring guide explaining both how to
choose and interpret each response. The act of evaluating a rubric, which we
call an Evaluation, provides basis for self-evaluation, procurement decisions,
or even marketing points. Written records of
an evaluation, which we'll call an <a>Evaluation Report</a>, document how a
particular subject is measured against the criteria. For students, a rubric
helps to clarify how their work will be evaluated by others. For
<a>Evaluator</a>s, a rubric provides a consistent framework for investigating
and documenting the performance of a subject against a particular set of
criteria.
</p>
<p>
We were inspired to develop a rubric for
decentralization when discussions about the requirements for decentralized
identifiers, aka DIDs, led to intractable disagreement. It became clear that
no single definition of "decentralized" would suffice for all of the
motivations that inspired developers of DID Methods to work on a new
decentralized approach to identity. Despite this challenge, two facts remained
clear:
</p>
<ol>
<li>
the people invested in this work shared a common goal of reversing the
problems with centralized identity systems and
</li>
<li>
they also had numerous, distinct reasons for doing so.
</li>
</ol>
<p>
Rather than attempt to force a definition of "decentralized" that might work
for many but would alienate others, the group set out to capture the
measurable factors that could enable <a>Evaluator</a>s to judge the
decentralization of DID Methods based on their own independent requirements,
with minimal embedded bias.
</p>
</section> <!-- background -->
<section>
<h3>How to apply this rubric</h3>
<p>
Pick the most important criteria for your use, ask each question, and select the
most appropriate response. Do this for all of the DID
Methods under consideration.
</p>
<p>
Each <a>Evaluation</a> should start with an explicit framing of the use under
consideration. Are you evaluating the Method for use in Internet-of-Things
(IoT)? For school childrens' extra-curricular activities? For international
travel? The use, or set of uses, will directly affect how some of the
questions are answered.
</p>
<p>
Where a given Method offers variability, such as multiple networks for the
same Method, then evaluate each variant. For example, did:ethr supports
Ethereum mainnet, multiple testnets and permissioned EVM-compliant networks
such as Quorum. To apply a criteria to did:ethr, you will evaluate it against
all the variations that matter <em>to you</em>. Each variation should get its
own <a>Evaluation</a>. This applies to Level 2 Networks that can operate on
multiple Level 1 Networks as well as DID Methods that directly offer support
for multiple underlying DID registries.
</p>
<p>
When creating an <a>Evaluation Report</a>, we recommend noting both the
<a>Evaluator</a> and the date of the <a>Evaluation</a>. Many of the criteria
are subjective and all of them may evolve. Tracking who
made the <a>Evaluation</a> and when they made it will help readers better
understand any biases or timeliness issues that may affect the applicability
of the <a>Evaluation</a>.
</p>
<p>
Be selective and acknowledge the subjective. <a>Evaluations</a> do not need to
be exhaustive. There is no requirement to answer all the questions. Simply
answer the ones most relevant to the use contemplated. Similarly, understand
that any recorded <a>Evaluation</a> is going to represent the biases of the
<a>Evaluator</a> in the context of their target use. Even the same
<a>Evaluator</a>, evaluating the same Method for a different use, may come up
with slightly different answers—for example, that which is economically
accessible for small businesses might not be cost-effective for refugees, and
that could affect how well-suited a given Method is for a specific use.
</p>
<p>
Finally, note that this particular rubric is about decentralization. It
doesn't cover all of the other criteria that might be relevant to evaluating a
given DID Method. There are security, privacy, and economic concerns that
should be considered. We look forward to working with the community to develop
additional rubrics for these other areas and encourage <a>Evaluator</a>s to
use this rubric as a starting point for their own work rather than the final
say in the merit of any given Method.
</p>
<p>
In short, use this rubric to help understand if a given DID Method is
decentralized enough for your needs.
</p>
</section> <!-- how to apply -->
<section>
<h3>Evaluation reports</h3>
<p>
To record and report an <a>Evaluation</a>, we recommend two possible formats,
either comprehensive or comparative.
</p>
<p>
A comprehensive <a>Evaluation</a> applies a single set of criteria to just one
Method. This set is chosen by the <a>Evaluator</a>; it need not be all
possible criteria, but it is all relevant criteria as judged by the
<a>Evaluator</a>.
</p>
<p>
A comparative <a>Evaluation</a> includes multiple Methods in the same table to
easily compare and contrast two or more different Methods. This may include
any number of criteria. These are the type of reports we use as examples
throughout the criteria list.
</p>
<p>
In addition to the selected criteria, we recommend each report specify
</p>
<ol>
<li>
The Method(s) being evaluated
</li>
<li>
A link to the Method specification
</li>
<li>
The <a>Evaluator</a>(s)
</li>
<li>
The date of the <a>Evaluation</a>
</li>
<li>
A description of the use case(s) for which the Method is being evaluated
</li>
<li>
The rubric used for the <a>Evaluation</a>, along with reference to the
specific version.
</li>
<li>
Optionally, a URL for retrieving the report.
</li>
</ol>
</section> <!-- evaluation -->
<section>
<h3>Categories of criteria</h3>
<p>
We have grouped our criteria into several categories:
</p>
<ol>
<li>
Rulemaking
</li>
<li>
Design
</li>
<li>
Operations
</li>
<li>
Enforcement
</li>
<li>
Alternatives
</li>
<li>
Adoption & Diversity
</li>
<li>
Security
</li>
<li>
Privacy</li>
</ol>
<p>
<a>Evaluator</a>s should consider criteria from all groups, as best fits
your use cases.
</p>
<p>
Three categories cover how a given Method is governed:
Rulemaking, Operations, and Enforcement.
Our approach parallels the same separation of
authority embodied in the United States Constitution.
</p>
<p>
<strong>Rulemaking</strong> addresses who makes the rules and how. (This is
the legislative branch in the US.)
</p>
<p>
<strong>Operations</strong> addresses how those rules are executed and how
everyone knows that they are carried out. (This is the executive branch in the
US.)
</p>
<p>
<strong>Enforcement</strong> addresses how we find and respond to rule
breaking. (This is the judicial branch in the US.)
</p>
<p>
This mental model is key to understanding the criteria of each section as well
as why we included some criteria and not others.
</p>
<p>The remaining categories each covers different areas worth considering
when evaluating DID Methods.</p>
<p>
<strong>Design</strong> addresses the method <em>as designed</em>. In other words, the output of the rulemaking:
what rules apply
to this DID method?
</p>
<p>
<strong>Alternatives</strong> address the availability and quality
of different implementation choices.
</p>
<p>
<strong>Adoption & Diversity</strong> covers questions related to how widely a
DID Method is used.
</p>
<p>
<strong>Security</strong> influences overall trust in the
ecosystem. Different DID methods offer different security guarantees, or guarantees of different strengths.
</p>
<p>
<strong>Privacy</strong> addresses the ability of a DID method to
ensure various privacy mechanisms. When DIDs are used as
identifiers for people, it becomes important to consider what
tools a DID method offers to operate at different levels
of privacy.
</p>
</section> <!-- categories -->
<section>
<h3>The architecture</h3>
<p>
When evaluating the governance of DID Methods, three potentially independent
layers should be considered: the specification, the network, and the registry.
</p>
<ul>
<li>
The <strong>specification</strong> is the governing document for the Method
that outlines how that particular Method implements the required and any
optional components of the DID Core specification [[DID-CORE]].
</li>
<li>
The <strong>network</strong> is the underlying communications layer, i.e., how
users of the Method communicate with others to invoke the operations of the
Method.
</li>
<li>
The <strong>registry</strong> is a given instance of recorded state changes,
managed according to the specification, using the communications channel.
</li>
</ul>
<p>
For Rulemaking, the criteria should be evaluated against all three of the
above layers.
</p>
<p>
For Operations, the criteria should be evaluated against the network and the
registry. The specification is taken as a given (it is the core output of
Rulemaking).
</p>
<p>
For Adoption, the criteria should be evaluated for each major software
component: wallet, resolver, and registry.
</p>
<p>
For Alternatives, the criteria should be evaluated against the particular DID
Method.
</p>
<p>
For the examples in the rest of this document we refer to a set of Methods
that are familiar to the authors and exhibit interesting characteristics for
<a>Evaluation</a>. See the tables below.
</p>
</section> <!-- architecture -->
<section>
<h3>DID Evaluations Cited</h3>
<p>The following sources provided example evaluations for this rubric. These are
not presented as objective fact, but rather as attempts by contributors to illustrate
noteworthy differences using their subjective judgement. Additional contributions
that flow into the registry should include a section that documents their source
with an additional row in the table.</p>
<table class="simple">
<tr><th>Relative ID</th><th>Citation</th><th>Note</th></tr>
<tr id="eval-1">
<td>eval-1</td>
<td>DID Method Rubric</td>
<td>Evaluations written by Joe Andrieu, for this rubric. Not explicitly funded.</td>
</tr>
<tr id="eval-2">
<td>eval-2</td>
<td>DID Method Rubric</td>
<td>Evaluations written by Daniel Hardman, to illustrate this rubric. Not explicitly funded.</td>
</tr>
<tr id="eval-3">
<td>eval-3</td>
<td>"A Rubric for Decentralization of DID Methods". Joe Andrieu, Shannon Appelcline,
Amy Guy, Joachim Lohkamp, Drummond Reed, Markus Sabadello, Oliver Terbu. <cite>Rebooting
the Web of Trust</cite>. Published 2020-07-20. <a
href="https://github.com/WebOfTrustInfo/rwot9-prague/blob/master/final-documents/decentralization-rubric.pdf">
https://github.com/WebOfTrustInfo/rwot9-prague/blob/master/final-documents/decentralization-rubric.pdf</a>.
</td>
<td>The original DID Rubric paper from Rebooting the Web of Trust. Not explicitly funded.</td>
</tr>
<tr id="eval-4">
<td>eval-4</td>
<td>VeresOne Rubric Evaluation</td>
<td>Funded by Digital Bazaar under DHS SVIP contract 70RSAT20T00000029.</td>
</tr>
</table>
</section>
<section>
<h3>Use Cases Referenced</h3>
<table class="simple">
<tr><th>Relative ID</th><th>Citation</th><th>Note</th></tr>
<tr id="usecase-1">
<td style="width:8em">usecase-1</td>
<td>Long term verifiable credentials</td>
<td>The use of DIDs as subject
identifiers for long term (life-long) verifiable credentials such as earned
academic degrees.</td>
</tr>
<tr id="usecase-2">
<td>usecase-2</td>
<td>User authentication</td>
<td>The use of DIDs for authenticating users
for access to system services.</td>
</tr>
<tr id="usecase-3">
<td>usecase-3</td>
<td>Verifiable software development</td>
<td>The signing of commits by
developers and their verification to ensure that source code in a particular
git repository is authentic.</td>
</tr>
</table>
</section>
<section>
<h3>Methods Considered</h3>
<table class="simple">
<tr>
<th>
Method
</th>
<th>
Specification
</th>
<th>
Network
</th>
<th>
Registry
</th>
</tr>
<tr id="did:peer">
<td>
did:peer
</td>
<td>
<a href="https://identity.foundation/peer-did-method-spec/index.html">Peer DID
Method Spec</a>
</td>
<td>
n/a (communications can flow over any agreeable channel)
</td>
<td>
Held by each peer
</td>
</tr>
<tr id="did:git">
<td>
did:git
</td>
<td>
<a href="https://github.com/dhuseby/did-git-spec/blob/master/did-git-spec.md">
DID git Spec</a>
</td>
<td>
<a href="https://git-scm.com/">git</a> (an open source version control system)
</td>
<td>
Any Method-compliant git repository
</td>
</tr>
<tr id="did:btcr">
<td>
did:btcr
</td>
<td>
<a href="https://w3c-ccg.github.io/didm-btcr/">DID Method BTCR</a>
</td>
<td>
Bitcoin
</td>
<td>
Bitcoin
</td>
</tr>
<tr id="did:sov">
<td>
did:sov
</td>
<td>
<a href="https://github.com/sovrin-foundation/sovrin/blob/master/spec/did-method-spec-template.html">
Sovrin DID Method Spec</a>
</td>
<td>
Hyperledger Indy
</td>
<td>
A particular instance of Hyperledger Indy
</td>
</tr>
<tr id="did:ethr">
<td>
did:ethr
</td>
<td>
<a href="https://github.com/decentralized-identity/ethr-did-resolver/blob/master/doc/did-method-spec.md">
ethr DID Method Spec</a>
</td>
<td>
Ethereum
</td>
<td>
Specific smart contracts for each network.
</td>
</tr>
<tr id="did:jolo">
<td>
did:jolo
</td>
<td>
<a href="https://github.com/jolocom/jolocom-did-driver/blob/master/jolocom-did-method-specification.md">
Jolocom DID Method Specification</a>
</td>
<td>
Ethereum
</td>
<td>
Specific smart contracts for different networks and subnetworks.
</td>
</tr>
<tr id="did:ipid">
<td>
did:ipid
</td>
<td>
<a href="https://did-ipid.github.io/ipid-did-method/">
IPID DID Method</a>
</td>
<td>
@johnnycrunch
</td>
<td>
DIDs persisted to IPFS.
</td>
</tr>
<tr id="did:web">
<td>
did:web
</td>
<td>
<a href="https://w3c-ccg.github.io/did-method-web/">
did:web Method Specification</a>
</td>
<td>
CCG work item
</td>
<td>
DIDs associated with control of a domain name (DNS).
</td>
</tr>
<tr id="did:indy">
<td>
did:indy
</td>
<td>
<a href="https://github.com/hyperledger/indy-did-method">
did:indy Method Spec</a>
</td>
<td>
Hyperledger Indy community
</td>
<td>
Supersedes did:sov to service all Indy-based ledgers.
</td>
</tr>
<tr id="did:iota">
<td>
did:iota
</td>
<td>
<a href="https://github.com/iotaledger/identity.rs/blob/main/docs/specs/iota_did_method_spec.md">
IOTA DID Method Spec</a>
</td>
<td>
IOTA Foundation
</td>
<td>
DIDs persisted to the IOTA tangle.
</td>
</tr>
<tr id="did:keri">
<td>
did:keri
</td>
<td>
<a href="https://identity.foundation/keri/did_methods/">
The did:keri Method 0.1</a>
</td>
<td>
Sam Smith, et al.
</td>
<td>
DIDs that can migrate from one blockchain to another, or use no blockchain at all.
</td>
</tr>
<tr id="did:hedera">
<td>
did:hedera
</td>
<td>
<a href="https://github.com/hashgraph/did-method/blob/master/did-method-specification.md">
Hedera Hashgraph DID Method Specification</a>
</td>
<td>
Hashgraph, Inc
</td>
<td>
DIDs written to a ledger that uses Hashgraph consensus as an alternative to traditional blockchain.
</td>
</tr>
<tr id="did:key">
<td>
did:key
</td>
<td>
<a href="https://w3c-ccg.github.io/did-method-key/">
The did:key Method v0.7</a>
</td>
<td>
CCG work item
</td>
<td>
Use a keypair as a DID.
</td>
</tr>
<tr id="did:twit">
<td>
did:twit
</td>
<td>
<a href="https://github.com/did-twit/did-twit/blob/master/spec/index.md">
Twit DID method specification</a>
</td>
<td>
Gabe Cohen
</td>
<td>
Publish a DID via Twitter feed.
</td>
</tr>
<tr id="did:pkh">
<td>
did:pkh
</td>
<td>
<a href="https://github.com/spruceid/ssi/blob/9ecc25cb4082709d146206b779cf8e8442f9eaf3/did-pkh/did-pkh-method-draft.md">
did:pkh Method Specification</a>
</td>
<td>
Spruce ID
</td>
<td>
Wrap an identifier (e.g., payment address) from one of many existing blockchains in did format.
</td>
</tr>
<tr id="did:trustbloc">
<td>
did:trustbloc
</td>
<td>
<a href="https://github.com/trustbloc/trustbloc-did-method/blob/main/docs/spec/trustbloc-did-method.md">
TrustBloc DID Method Specification 0.1</a>
</td>
<td>
Trustbloc
</td>
<td>
Persist DIDs via Sidetree wrapper around a permissioned ledger.
</td>
</tr>
<tr id="did:jlinc">
<td>
did:jlinc
</td>
<td>
<a href="https://did-spec.jlinc.org/">
JLINC DID Method Specification</a>
</td>
<td>
JLincLabs
</td>
<td>
Register DIDs over the JLinc protocol for sharing data with terms and conditions.
</td>
</tr>
</table>
</section>
<section>
<h3>Criteria and Criterion</h3>
<p>The term "criteria" is often treated as both a
singular and a plural noun. In the singular, we say "The most
important criteria is the buyer's age". This singular use of
criteria has been in use for over half a century <a
href="https://www.merriam-webster.com/dictionary/criterion#usage-1">https://www.merriam-webster.com/dictionary/criterion#usage-1</a>
In the plural we say "That proposal doesn't meet all of the
criteria." Sometimes people use both in the same sentence: "Select
one criteria from the list of criteria."
</p>
<p>However, for formal use, "criteria" is more broadly accepted as
plural while "criterion" is singular. By this style rule, the last
sentence in the previous paragraph <em>should</em> be "Select one
criterion from the list of criteria." </p>
<p>As editors of a Note published by the World Wide Web Consortium,
we are torn. We would prefer to use
rigorous grammar and be consistent in doing so. At the same time,
we find attempts to enforce the formal rule sometimes leads people
to using the inverse, such as "Select a criteria from the list of
criterion." This is the exact opposite of our desired outcome.</p>
<p>In our own work with implementers and developers of the DID
Core specification, we have found that the singular "criteria" is
readily accepted and understood and leads to no confusion when
used, even alongside plural usage. Since the DID Method Rubric
is, at its core, a set of criteria with ample reason to refer to,
for example, "Criteria #23", we find the combined singular and
plural use is cleaner (just stick with "criteria"), less
confusing, and more aligned with common usage among our audience.</p>
<p>As such, throughout the DID Method Rubric, we use the term
"criteria" to refer to both singular instances of
criteria and plural sets of criteria.</p>
</section> <!-- criteria v criterion -->
</section> <!-- introduction -->
<section>
<h2>Registration Process</h2>
This registry — the DID Method Rubric Registry — provides a public vehicle for publishing updated DID Method Rubric
criteria. In order to add or update a criteria, a submitter MUST submit a modification request for this registry, as a
pull request on the repository where this registry is hosted, where the modification request adheres to the following
policies:</p>
<section>
<h3>General Requirements</h3>
<ol>
<li>If there are copyright, trademark, or intellectual property
rights concerns, the addition and use MUST be authorized
in writing by the intellectual property rights holder under a F/RAND
license. For example, criteria that use trademarked brand names, use the titles or excerpts from copyrighted works, or require patented technology to perform the evaluation.
</li>
<li>Additions MUST NOT create unreasonable legal, security,
moral, or privacy issues that may result in direct or
indirect harm to others, including violations of the W3C Code Of
Ethics And Professional Conduct <a href="https://www.w3.org/Consortium/cepc/">
https://www.w3.org/Consortium/cepc/</a>
Examples of unacceptable additions include
any containing hate speech, unprofessional or
unethical attacks, proprietary information, and any personal data or
personally identifiable information (excepting
identification details of the submitter).
</li>
<li>All criteria MUST be uniquely identified and versioned to ensure
permanent linkability with version trackability. Those
identifiers MUST be present as an `href` anchor in the HTML for the
criteria itself. See the sections below on identifiers
and versioning. Subcomponents, such as questions, responses,
relevance, and examples, are not separately versioned; any
change to a subcomponent triggers an appropriate version change to
its primary component.
</li>
<li>Use cases, methods, and evaluations cited within a criteria MUST
include a local link to a full citation in
the relevant citation section: Use Cases Referenced,
Methods Evaluated, and Evaluations Cited, respectively.
</li>
<li>Rubric criteria and associated metadata in the DID Method
Rubric Registry MAY be updated or removed at the editors’
discretion, including the addition or removal of categories of
metadata such as <em>Use Cases Referenced</em> or <em>Implementations
of Note</em>.</li>
</ol>
</section>
<section>
<h3>Component Requirements</h3>
<p>The primary components managed by this registry are criteria for
evaluating DID Methods, with as many as eight
subcomponents: name, id, version, question, responses, relevance,
examples, and, optionally, a source. In addition, the
DID Method Rubric maintains a list of cited references: DID methods,
use cases, and evaluations. The criteria are
independently identified and versioned (see those sections for
details) while references cited in the criteria
themselves link to their full citation in the appropriate list.</p>
<section>
<h4>Criteria</h4>
<ol>
<li>Updates to criteria MUST follow the following versioning
guidelines.
</li>
<li>Proposed criteria without at least three independent example
evaluations MUST have the word “PROVISIONAL” as the last
term in its name. Once at least three independent example
evaluations exist, the word “PROVISIONAL” SHOULD be removed.
</li>
<li>Accepted (non-provisional) criteria MUST have at least three example evaluations, and generally SHOULD have no more example
evaluations than there are possible responses to its question. Each example should distinctly illustrate one possible response to the
criteria's question.
</li>
<li>To be considered, criteria MUST have the following subcomponents:
<ol type="a">
<li>
Name
</li>
<li>
Identifier
</li>
<li>
Version
</li>
<li>
Question
</li>
<li>
Possible Responses
</li>
<li>
Relevance
</li>
<li>
Example Evaluations
</li>
</ol>
</li>
<li>
Additionally, criteria MAY have the following optional subcomponents:
<ol type="a">
<li>
Source
</li>
</ol>
</li>
</ol>
</section> <!-- criteria -->
<section>
<h4>Criteria.Name</h4>
<p>Each criteria needs a human-friendly, short, descriptive name
that captures the essence of the criteria as concisely as
possible.</p>
</section> <!-- Criteria.Name -->
<section>
<h4>Criteria.Id</h4>
<p>The criteria id must be explicit, unique, and persistent. See the section on Identifiers for details.
</p>
</section> <!-- Criteria.Id -->
<section>
<h4>Criteria.Version</h4>
<p>The criteria version must increment, as appropriate, any time
the criteria, or any of its subcomponents, is updated. See
the section on <a href="#versioning">Versioning</a> for details.</p>
</section> <!-- Criteria.Version -->
<section>
<h4>Criteria.Question</h4>
<p>This subcomponent presents the key question for evaluating the
criteria. The question MUST be a single inquiry that gets to
the heart of the criteria. It should be reasonably clear to a
competent professional skilled in the art how to resolve
the question into one of the possible responses.</p>
</section> <!-- Criteria.Question -->
<section>
<h4>Criteria.Responses</h4>
<p>This subcomponent lists the expected responses to the question.</p>
<ol>
<li>
Responses are either labeled or open ended.
</li>
<li>
Labeled responses MUST include a label and a meaning for that
label.
</li>
<li>
Labels for a response MUST start with “A” in each criteria and progress sequentially through the English alphabet.
</li>
<li>
Open ended responses specify how an evaluator should fill in the response.
</li>
</ol>
</section> <!-- Criteria.Responses -->
<section>
<h4>Criteria.Relevance</h4>
<p>This subcomponent explains why this criteria is useful. Readers
should be able to understand the extent to which this
particular criteria is applicable to their situation.
</p>
</section> <!-- Criteria.Relevance -->
<section>
<h4>Criteria.Examples</h4>
<p>This section lists example evaluations of different DID methods
using this criteria, presented as a table for easy
correlation between evaluations of different methods using the
same criteria.
</p>
<ol>
<li>Each example must have the following entries, each as its own column(s) in the examples table.
<ol type="a">
<li>Method</li>
<li>Responses (on column for each element)</li>
<li>Notes</li>
</ol>
</li>
<li>
Some criteria MAY require an additional "Referent" entry, in its
own column in the examples table.
</li>
<li>Example Evaluations for a given criteria SHOULD highlight
distinct responses. That is, each evaluation should have a
different set of responses from the other example evaluations.
The point of the examples is to illustrate how different
DID methods score differently.</li>
<li>The editors will curate the list of proposed examples to
provide a best effort illustration of each criteria, with
consideration to highlighting a broad corpus of methods.</li>
<li>Criteria with fewer than 3 example evaluations MUST be
marked “PROVISIONAL”</li>
<li>Criteria SHOULD have no more example evaluations than there
are possible responses.</li>
</ol>
<section>
<h4>Criteria.Examples.Method</h4>
<ol>
<li>Example evaluations MUST specify the evaluated method in
the first column, using a relative link to the method’s entry
in the Methods Evaluated section. The entry in the Methods
Evaluated table MUST conform to the requirements in that
section.
</li>
<li>The title of this column MUST be “Method”.
</li>
<li>The Method entry MUST uniquely identify a specific
DID Method specification suitable for evaluation,
including any specified variant such as testnet, mainnet,
etc.
</li>
</ol>