Kiln » Kiln Extensions
Clone URL:  
Pushed to 2 repositories · View In Graph Contained in tip

bump extensions to Kiln 2.5.130

Changeset 03426ac40250

Parent 08a3cb8d3d4a

by Profile picture of User 12Benjamin Pollack <benjamin@fogcreek.com>

Changes to 11 files · Browse files at 03426ac40250 Showing diff from parent 08a3cb8d3d4a Diff from another changeset...

 
89
90
91
92
 
93
94
95
 
143
144
145
146
 
147
148
149
 
154
155
156
157
 
158
159
160
 
176
177
178
179
 
180
181
182
 
89
90
91
 
92
93
94
95
 
143
144
145
 
146
147
148
149
 
154
155
156
 
157
158
159
160
 
176
177
178
 
179
180
181
182
@@ -89,7 +89,7 @@
  if os.path.exists(outfilename): # for windows   os.remove(outfilename)   os.rename(tmpfilename, outfilename) - bfutil.copy_to_cache(self.repo, self.repo['.'].node(), filename, True) + bfutil.copytocache(self.repo, self.repo['.'].node(), filename, True)   success.append((filename, hhash))     ui.progress(_('getting bfiles'), None) @@ -143,7 +143,7 @@
   import localstore, httpstore   -_store_provider = { +_storeprovider = {   'file': (localstore, 'localstore'),   'http': (httpstore, 'httpstore'),   'https': (httpstore, 'httpstore'), @@ -154,7 +154,7 @@
 # During clone this function is passed the src's ui object  # but it needs the dest's ui object so it can read out of  # the config file. Use repo.ui instead. -def _open_store(repo, path=None, put=False): +def _openstore(repo, path=None, put=False):   ui = repo.ui   if not path:   path = ui.expandpath('default-push', 'default') @@ -176,7 +176,7 @@
  scheme = match.group(1)     try: - (mod, klass) = _store_provider[scheme] + (mod, klass) = _storeprovider[scheme]   except KeyError:   raise util.Abort(_('unsupported URL scheme %r') % scheme)  
 
28
29
30
31
 
32
33
34
 
73
74
75
76
 
77
78
79
 
88
89
90
91
92
 
 
93
94
95
 
146
147
148
149
 
150
151
152
 
153
154
155
 
176
177
178
179
180
 
 
181
182
183
 
214
215
216
217
 
218
219
220
 
241
242
243
244
 
245
246
247
 
249
250
251
252
253
 
 
254
255
256
257
258
259
260
 
261
262
263
 
264
265
266
 
299
300
301
302
 
303
304
305
 
314
315
316
317
 
318
319
320
 
325
326
327
328
 
329
330
331
 
333
334
335
336
 
337
338
339
 
341
342
343
344
 
345
346
347
 
353
354
355
356
 
357
358
359
 
360
361
362
 
363
364
365
366
 
367
368
369
370
371
 
372
373
374
 
378
379
380
381
 
382
383
384
 
403
404
405
406
 
407
408
409
 
421
422
423
424
 
425
426
427
 
449
450
451
452
 
453
454
455
 
456
457
458
459
 
460
461
462
 
476
477
478
479
 
480
481
482
 
491
492
493
494
 
495
496
497
 
28
29
30
 
31
32
33
34
 
73
74
75
 
76
77
78
79
 
88
89
90
 
 
91
92
93
94
95
 
146
147
148
 
149
150
151
 
152
153
154
155
 
176
177
178
 
 
179
180
181
182
183
 
214
215
216
 
217
218
219
220
 
241
242
243
 
244
245
246
247
 
249
250
251
 
 
252
253
254
255
256
257
258
259
 
260
261
262
 
263
264
265
266
 
299
300
301
 
302
303
304
305
 
314
315
316
 
317
318
319
320
 
325
326
327
 
328
329
330
331
 
333
334
335
 
336
337
338
339
 
341
342
343
 
344
345
346
347
 
353
354
355
 
356
357
358
 
359
360
361
 
362
363
364
365
 
366
367
368
369
370
 
371
372
373
374
 
378
379
380
 
381
382
383
384
 
403
404
405
 
406
407
408
409
 
421
422
423
 
424
425
426
427
 
449
450
451
 
452
453
454
 
455
456
457
458
 
459
460
461
462
 
476
477
478
 
479
480
481
482
 
491
492
493
 
494
495
496
497
@@ -28,7 +28,7 @@
  tobfile = True   size = opts['size']   if not size: - size = ui.config(bfutil.long_name, 'size', default=None) + size = ui.config(bfutil.longname, 'size', default=None)   try:   size = int(size)   except ValueError: @@ -73,7 +73,7 @@
  bfiles = set()   normalfiles = set()   if not pats: - pats = ui.config(bfutil.long_name, 'patterns', default=()) + pats = ui.config(bfutil.longname, 'patterns', default=())   if pats:   pats = pats.split(' ')   if pats: @@ -88,8 +88,8 @@
  bfiles, normalfiles, matcher, size, bfiletohash)   ui.progress(_('converting revisions'), None)   - if os.path.exists(rdst.wjoin(bfutil.short_name)): - shutil.rmtree(rdst.wjoin(bfutil.short_name)) + if os.path.exists(rdst.wjoin(bfutil.shortname)): + shutil.rmtree(rdst.wjoin(bfutil.shortname))     for f in bfiletohash.keys():   if os.path.isfile(rdst.wjoin(f)): @@ -146,10 +146,10 @@
  raise IOError()   renamed = fctx.renamed()   if renamed: - renamed = bfutil.split_standin(renamed[0]) + renamed = bfutil.splitstandin(renamed[0])     hash = fctx.data().strip() - path = bfutil.find_file(rsrc, hash) + path = bfutil.findfile(rsrc, hash)   ### TODO: What if the file is not cached?   data = ''   with open(path, 'rb') as fd: @@ -176,8 +176,8 @@
    dstfiles = []   for file in files: - if bfutil.is_standin(file): - dstfiles.append(bfutil.split_standin(file)) + if bfutil.isstandin(file): + dstfiles.append(bfutil.splitstandin(file))   else:   dstfiles.append(file)   # Commit @@ -214,7 +214,7 @@
  dstfiles = []   for f in files:   if f not in bfiles and f not in normalfiles: - isbfile = _is_bfile(f, ctx, matcher, size) + isbfile = _isbfile(f, ctx, matcher, size)   # If this file was renamed or copied then copy   # the bfileness of its predecessor   if f in ctx.manifest(): @@ -241,7 +241,7 @@
    # bfile was modified, update standins   fullpath = rdst.wjoin(f) - bfutil.create_dir(os.path.dirname(fullpath)) + bfutil.createdir(os.path.dirname(fullpath))   m = util.sha1('')   m.update(ctx[f].data())   hash = m.hexdigest() @@ -249,18 +249,18 @@
  with open(fullpath, 'wb') as fd:   fd.write(ctx[f].data())   executable = 'x' in ctx[f].flags() - os.chmod(fullpath, bfutil.get_mode(executable)) - bfutil.write_standin(rdst, bfutil.standin(f), hash, executable) + os.chmod(fullpath, bfutil.getmode(executable)) + bfutil.writestandin(rdst, bfutil.standin(f), hash, executable)   bfiletohash[f] = hash   else:   # normal file   dstfiles.append(f)     def getfilectx(repo, memctx, f): - if bfutil.is_standin(f): + if bfutil.isstandin(f):   # if the file isn't in the manifest then it was removed   # or renamed, raise IOError to indicate this - srcfname = bfutil.split_standin(f) + srcfname = bfutil.splitstandin(f)   try:   fctx = ctx.filectx(srcfname)   except error.LookupError: @@ -299,7 +299,7 @@
  rdst.dirstate.setparents(ret)   revmap[ctx.node()] = rdst.changelog.tip()   -def _is_bfile(file, ctx, matcher, size): +def _isbfile(file, ctx, matcher, size):   '''   A file is a bfile if it matches a pattern or is over   the given size. @@ -314,7 +314,7 @@
  except error.LookupError:   return False   -def upload_bfiles(ui, rsrc, rdst, files): +def uploadbfiles(ui, rsrc, rdst, files):   '''upload big files to the central store'''     if not files: @@ -325,7 +325,7 @@
  if not rdst.path.startswith('http'):   return   - store = basestore._open_store(rsrc, rdst.path, put=True) + store = basestore._openstore(rsrc, rdst.path, put=True)     at = 0   for hash in files: @@ -333,7 +333,7 @@
  if store.exists(hash):   at += 1   continue - source = bfutil.find_file(rsrc, hash) + source = bfutil.findfile(rsrc, hash)   if not source:   raise util.Abort(_('Missing bfile %s needs to be uploaded') % hash)   # XXX check for errors here @@ -341,7 +341,7 @@
  at += 1   ui.progress('uploading bfiles', None)   -def verify_bfiles(ui, repo, all=False, contents=False): +def verifybfiles(ui, repo, all=False, contents=False):   '''Verify that every big file revision in the current changeset   exists in the central store. With --contents, also verify that   the contents of each big file revision are correct (SHA-1 hash @@ -353,22 +353,22 @@
  else:   revs = ['.']   - store = basestore._open_store(repo) + store = basestore._openstore(repo)   return store.verify(revs, contents=contents)   -def revert_bfiles(ui, repo, file_list=None): +def revertbfiles(ui, repo, filelist=None):   wlock = repo.wlock()   try: - bfdirstate = bfutil.open_bfdirstate(ui, repo) + bfdirstate = bfutil.openbfdirstate(ui, repo)   s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False)   (unsure, modified, added, removed, missing, unknown, ignored, clean) = s   - bfiles = bfutil.list_bfiles(repo) + bfiles = bfutil.listbfiles(repo)   toget = []   at = 0   updated = 0   for bfile in bfiles: - if file_list == None or bfile in file_list: + if filelist == None or bfile in filelist:   if not os.path.exists(repo.wjoin(bfutil.standin(bfile))):   bfdirstate.remove(bfile)   continue @@ -378,7 +378,7 @@
  expectedhash = repo[None][bfutil.standin(bfile)].data().strip()   mode = os.stat(repo.wjoin(bfutil.standin(bfile))).st_mode   if not os.path.exists(repo.wjoin(bfile)) or expectedhash != bfutil.hashfile(repo.wjoin(bfile)): - path = bfutil.find_file(repo, expectedhash) + path = bfutil.findfile(repo, expectedhash)   if path is None:   toget.append((bfile, expectedhash))   else: @@ -403,7 +403,7 @@
  bfutil.dirstate_normaldirty(bfdirstate, bfutil.unixpath(bfile))     if toget: - store = basestore._open_store(repo) + store = basestore._openstore(repo)   success, missing = store.get(toget)   else:   success, missing = [], [] @@ -421,7 +421,7 @@
    removed = 0   for bfile in bfdirstate: - if file_list == None or bfile in file_list: + if filelist == None or bfile in filelist:   if not os.path.exists(repo.wjoin(bfutil.standin(bfile))):   if os.path.exists(repo.wjoin(bfile)):   os.unlink(repo.wjoin(bfile)) @@ -449,14 +449,14 @@
  finally:   wlock.release()   -def update_bfiles(ui, repo): +def updatebfiles(ui, repo):   wlock = repo.wlock()   try: - bfdirstate = bfutil.open_bfdirstate(ui, repo) + bfdirstate = bfutil.openbfdirstate(ui, repo)   s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False)   (unsure, modified, added, removed, missing, unknown, ignored, clean) = s   - bfiles = bfutil.list_bfiles(repo) + bfiles = bfutil.listbfiles(repo)   toget = []   at = 0   updated = 0 @@ -476,7 +476,7 @@
  expectedhash = repo[None][bfutil.standin(bfile)].data().strip()   mode = os.stat(repo.wjoin(bfutil.standin(bfile))).st_mode   if not os.path.exists(repo.wjoin(bfile)) or expectedhash != bfutil.hashfile(repo.wjoin(bfile)): - path = bfutil.find_file(repo, expectedhash) + path = bfutil.findfile(repo, expectedhash)   if not path:   toget.append((bfile, expectedhash))   else: @@ -491,7 +491,7 @@
  bfdirstate.normal(bfutil.unixpath(bfile))     if toget: - store = basestore._open_store(repo) + store = basestore._openstore(repo)   (success, missing) = store.get(toget)   else:   success, missing = [],[]
1
2
3
4
5
6
7
8
9
 
 
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
 
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
 
180
181
 
182
183
184
185
186
187
188
189
190
 
191
192
193
194
 
195
196
197
198
199
 
 
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
 
 
 
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
 
239
240
241
242
243
 
 
244
245
246
247
248
249
250
 
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
 
268
269
270
271
272
 
273
274
275
276
277
278
279
280
281
282
283
284
285
286
 
287
288
 
289
290
 
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
 
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
 
 
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
 
 
393
394
395
396
397
398
399
400
401
 
 
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
 
443
444
445
 
446
447
448
449
450
451
452
 
453
454
455
456
457
458
459
460
461
462
463
464
465
466
 
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
 
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
 
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
 
592
593
594
595
596
597
598
599
 
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
 
621
622
623
624
625
626
627
628
629
630
631
 
632
633
634
635
636
637
638
639
640
641
642
 
643
644
 
645
646
647
648
649
650
651
652
653
 
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
 
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
 
 
752
753
754
755
756
757
758
759
760
 
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
 
 
781
782
783
784
785
786
787
788
789
790
791
 
792
793
794
795
 
 
 
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
 
840
841
842
 
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
 
 
862
863
 
864
865
866
867
 
868
869
870
871
872
873
874
875
 
 
 
 
876
877
878
879
880
881
882
883
884
 
885
886
887
 
888
889
890
891
892
893
894
895
896
897
 
 
898
899
 
900
901
 
902
903
 
 
 
 
 
 
 
 
 
 
 
 
 
 
904
905
906
907
908
909
910
 
911
912
913
914
915
 
916
917
918
919
920
 
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
 
 
1033
1034
 
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
 
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
 
1125
1126
1127
1128
1129
1130
1131
1132
 
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
 
1175
1176
1177
1178
1179
1180
1181
 
1182
1183
1184
1185
1186
1187
 
1188
1189
1190
1191
1192
1193
1194
 
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
 
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
 
1230
1231
1232
1233
1234
1235
1236
1237
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
 
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1
2
3
4
5
6
7
8
 
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
 
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
 
180
181
 
182
183
184
185
186
187
188
189
190
 
191
192
193
194
 
195
196
197
198
 
 
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
 
 
 
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
 
239
240
241
242
 
 
243
244
245
246
247
248
249
250
 
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
 
268
269
270
271
272
 
273
274
275
276
277
278
279
280
281
282
283
284
285
286
 
287
288
 
289
290
 
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
 
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
 
 
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
 
 
392
393
394
395
396
397
398
399
400
 
 
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
 
443
444
445
 
446
447
448
449
450
451
452
 
453
454
455
456
457
458
459
460
461
462
463
464
465
466
 
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
 
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
 
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
 
592
593
594
595
596
597
598
599
 
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
 
621
622
623
624
625
626
627
628
629
630
631
 
632
633
634
635
636
637
638
639
640
641
642
 
643
644
 
645
646
647
648
649
650
651
652
653
 
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
 
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
 
 
751
752
753
754
755
756
757
758
759
760
 
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
 
 
780
781
782
783
784
785
786
787
788
789
790
791
 
792
793
 
 
 
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
 
840
841
842
 
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
 
871
872
873
874
875
 
 
 
 
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
 
 
902
903
904
 
905
906
 
907
908
 
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
 
929
930
931
932
933
 
934
935
936
937
938
 
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
 
 
1050
1051
1052
 
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
 
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
 
1143
1144
1145
1146
1147
1148
1149
1150
 
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
 
1193
1194
1195
1196
1197
1198
1199
 
1200
1201
1202
1203
1204
1205
 
1206
1207
1208
1209
1210
1211
1212
 
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
 
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
 
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
 
 
 '''Setup code for bfiles extension: reposetup(), uisetup().'''    import os  import types  import copy  import re    from mercurial import hg, extensions, commands, util, context, cmdutil, \ - match as match_, filemerge, node, archival, httprepo, error, manifest + match as match_, filemerge, node, archival, httprepo, error, \ + manifest, merge  from mercurial.i18n import _  from mercurial.node import hex  from hgext import rebase  import bfutil, bfcommands    try:   from mercurial import scmutil  except ImportError:   pass    # -- Wrappers: modify existing commands --------------------------------    def reposetup(ui, repo):   # add a kbfiles-specific querystring argument to remote requests, so kiln can reject   # operations on a kbfiles-enabled remote repo from a non-kbfiles local repo.   if issubclass(repo.__class__, httprepo.httprepository):   class kbfilesrepo(repo.__class__):   # The function we want to override is do_cmd for Mercurial <= 1.6   # and _callstream for Mercurial > 1.6. Wrap whichever one we can find.   if hasattr(repo.__class__, 'do_cmd'):   def do_cmd(self, cmd, **args):   args['kbfiles'] = 'true'   return super(kbfilesrepo, self).do_cmd(cmd, **args)   if hasattr(repo.__class__, '_callstream'):   def _callstream(self, cmd, **args):   args['kbfiles'] = 'true'   return super(kbfilesrepo, self)._callstream(cmd, **args)   repo.__class__ = kbfilesrepo     # bfiles doesn't support non-local repositories -- get out quick in   # such a case   if not repo.local():   return     for name in ('status', 'commitctx', 'commit', 'push'):   method = getattr(repo, name)   #if not (isinstance(method, types.MethodType) and   # method.im_func is repo.__class__.commitctx.im_func):   if isinstance(method, types.FunctionType) and method.func_name == 'wrap':   ui.warn(_('kbfiles: repo method %r appears to have already been '   'wrapped by another extension: '   'kbfiles may behave incorrectly\n')   % name)     class bfiles_repo(repo.__class__):   bfstatus = False   def status_nobfiles(self, *args, **kwargs):   return super(bfiles_repo, self).status(*args, **kwargs)     # When bfstatus is set, return a context that gives the names of bfiles   # instead of their corresponding standins and identifies the bfiles as   # always binary, regardless of their actual contents.   def __getitem__(self, changeid):   ctx = super(bfiles_repo, self).__getitem__(changeid)   if self.bfstatus:   class bfiles_manifestdict(manifest.manifestdict):   def __contains__(self, filename):   if super(bfiles_manifestdict, self).__contains__(filename):   return True   return super(bfiles_manifestdict, self).__contains__('.kbf/' + filename)   class bfiles_ctx(ctx.__class__):   def files(self):   filenames = super(bfiles_ctx, self).files()   return [re.sub(r'^\.kbf/', '', filename) for filename in filenames]   def manifest(self):   man1 = super(bfiles_ctx, self).manifest()   man1.__class__ = bfiles_manifestdict   return man1   def filectx(self, path, fileid=None, filelog=None):   try:   result = super(bfiles_ctx, self).filectx(path, fileid, filelog)   except error.LookupError:   # Adding a null character will cause Mercurial to identify this   # as a binary file.   result = super(bfiles_ctx, self).filectx('.kbf/' + path, fileid, filelog)   olddata = result.data   result.data = lambda: olddata() + '\0'   return result   ctx.__class__ = bfiles_ctx   return ctx     # Figure out the status of big files and insert them into the   # appropriate list in the result. Also removes standin files from   # the listing. This function reverts to the original status if   # self.bfstatus is False   def status(self, node1='.', node2=None, match=None, ignored=False, clean=False, unknown=False, subrepos=None):   listignored, listclean, listunknown = ignored, clean, unknown   if not self.bfstatus:   try:   return super(bfiles_repo, self).status(node1, node2, match, listignored, listclean, listunknown, subrepos)   except TypeError:   return super(bfiles_repo, self).status(node1, node2, match, listignored, listclean, listunknown)   else:   # some calls in this function rely on the old version of status   self.bfstatus = False   if isinstance(node1, context.changectx):   ctx1 = node1   else:   ctx1 = repo[node1]   if isinstance(node2, context.changectx):   ctx2 = node2   else:   ctx2 = repo[node2]   working = ctx2.rev() is None   parentworking = working and ctx1 == self['.']     def inctx(file, ctx):   try:   if ctx.rev() is None:   return file in ctx.manifest()   ctx[file]   return True   except:   return False     # create a copy of match that matches standins instead of bfiles   # if matcher not set then it is the always matcher so overwrite that   if match is None:   match = match_.always(self.root, self.getcwd())     def tostandin(file):   if inctx(bfutil.standin(file), ctx2):   return bfutil.standin(file)   return file     m = copy.copy(match)   m._files = [tostandin(f) for f in m._files]     # get ignored clean and unknown but remove them later if they were not asked for   try:   result = super(bfiles_repo, self).status(node1, node2, m, True, True, True, subrepos)   except TypeError:   result = super(bfiles_repo, self).status(node1, node2, m, True, True, True)   if working:   # Hold the wlock while we read bfiles and update the bfdirstate   wlock = repo.wlock()   try:   # Any non bfiles that were explicitly listed must be taken out or   # bfdirstate.status will report an error. The status of these files   # was already computed using super's status. - bfdirstate = bfutil.open_bfdirstate(ui, self) + bfdirstate = bfutil.openbfdirstate(ui, self)   match._files = [f for f in match._files if f in bfdirstate]   s = bfdirstate.status(match, [], listignored, listclean, listunknown)   (unsure, modified, added, removed, missing, unknown, ignored, clean) = s   if parentworking:   for bfile in unsure:   if ctx1[bfutil.standin(bfile)].data().strip() != bfutil.hashfile(self.wjoin(bfile)):   modified.append(bfile)   else:   clean.append(bfile)   bfdirstate.normal(bfutil.unixpath(bfile))   bfdirstate.write()   else:   tocheck = unsure + modified + added + clean   modified, added, clean = [], [], []     for bfile in tocheck:   standin = bfutil.standin(bfile)   if inctx(standin, ctx1):   if ctx1[standin].data().strip() != bfutil.hashfile(self.wjoin(bfile)):   modified.append(bfile)   else:   clean.append(bfile)   else:   added.append(bfile)   finally:   wlock.release()     for standin in ctx1.manifest(): - if not bfutil.is_standin(standin): + if not bfutil.isstandin(standin):   continue - bfile = bfutil.split_standin(standin) + bfile = bfutil.splitstandin(standin)   if not match(bfile):   continue   if bfile not in bfdirstate:   removed.append(bfile)   # Handle unknown and ignored differently   bfiles = (modified, added, removed, missing, [], [], clean)   result = list(result)   # Unknown files - result[4] = [f for f in unknown if repo.dirstate[f] == '?' and not bfutil.is_standin(f)] + result[4] = [f for f in unknown if repo.dirstate[f] == '?' and not bfutil.isstandin(f)]   # Ignored files must be ignored by both the dirstate and bfdirstate   result[5] = set(ignored).intersection(set(result[5]))   # combine normal files and bfiles - normals = [[fn for fn in filelist if not bfutil.is_standin(fn)] for filelist in result] + normals = [[fn for fn in filelist if not bfutil.isstandin(fn)] for filelist in result]   result = [sorted(list1 + list2) for (list1, list2) in zip(normals, bfiles)]   else:   def toname(f): - if bfutil.is_standin(f): - return bfutil.split_standin(f) + if bfutil.isstandin(f): + return bfutil.splitstandin(f)   return f   result = [[toname(f) for f in items] for items in result]     if not listunknown:   result[4] = []   if not listignored:   result[5] = []   if not listclean:   result[6] = []   self.bfstatus = True   return result     # This call happens after a commit has occurred. Copy all of the bfiles   # into the cache   def commitctx(self, *args, **kwargs):   node = super(bfiles_repo, self).commitctx(*args, **kwargs)   ctx = self[node]   for filename in ctx.files(): - if bfutil.is_standin(filename) and filename in ctx.manifest(): - realfile = bfutil.split_standin(filename) - bfutil.copy_to_cache(self, ctx.node(), realfile) + if bfutil.isstandin(filename) and filename in ctx.manifest(): + realfile = bfutil.splitstandin(filename) + bfutil.copytocache(self, ctx.node(), realfile)     return node     # This call happens before a commit has occurred. The bfile standins   # have not had their contents updated (to reflect the hash of their bfile).   # Do that here.   def commit(self, text="", user=None, date=None, match=None, force=False,   editor=False, extra={}):   orig = super(bfiles_repo, self).commit     wlock = repo.wlock()   try:   if getattr(repo, "_isrebasing", False):   # We have to take the time to pull down the new bfiles now. Otherwise   # if we are rebasing, any bfiles that were modified in the changesets we   # are rebasing on top of get overwritten either by the rebase or in the   # first commit after the rebase. - bfcommands.update_bfiles(repo.ui, repo) + bfcommands.updatebfiles(repo.ui, repo)   # Case 1: user calls commit with no specific files or   # include/exclude patterns: refresh and commit everything.   if (match is None) or (not match.anypats() and not match.files()): - bfiles = bfutil.list_bfiles(self) - bfdirstate = bfutil.open_bfdirstate(ui, self) + bfiles = bfutil.listbfiles(self) + bfdirstate = bfutil.openbfdirstate(ui, self)   # this only loops through bfiles that exist (not removed/renamed)   for bfile in bfiles:   if os.path.exists(self.wjoin(bfutil.standin(bfile))):   # this handles the case where a rebase is being performed and the   # working copy is not updated yet.   if os.path.exists(self.wjoin(bfile)): - bfutil.update_standin(self, bfutil.standin(bfile)) + bfutil.updatestandin(self, bfutil.standin(bfile))   bfdirstate.normal(bfutil.unixpath(bfile))   for bfile in bfdirstate:   if not os.path.exists(repo.wjoin(bfutil.standin(bfile))):   path = bfutil.unixpath(bfile)   try:   # Mercurial >= 1.9   bfdirstate.drop(path)   except AttributeError:   # Mercurial <= 1.8   bfdirstate.forget(path)   bfdirstate.write()     return orig(text=text, user=user, date=date, match=match,   force=force, editor=editor, extra=extra)     for file in match.files(): - if bfutil.is_standin(file): + if bfutil.isstandin(file):   raise util.Abort("Don't commit bfile standin. Commit bfile.")     # Case 2: user calls commit with specified patterns: refresh any   # matching big files. - smatcher = bfutil.compose_standin_matcher(self, match) + smatcher = bfutil.composestandinmatcher(self, match)   standins = bfutil.dirstate_walk(self.dirstate, smatcher)     # No matching big files: get out of the way and pass control to   # the usual commit() method.   if not standins:   return orig(text=text, user=user, date=date, match=match,   force=force, editor=editor, extra=extra)     # Refresh all matching big files. It's possible that the commit   # will end up failing, in which case the big files will stay   # refreshed. No harm done: the user modified them and asked to   # commit them, so sooner or later we're going to refresh the   # standins. Might as well leave them refreshed. - bfdirstate = bfutil.open_bfdirstate(ui, self) + bfdirstate = bfutil.openbfdirstate(ui, self)   for standin in standins: - bfile = bfutil.split_standin(standin) + bfile = bfutil.splitstandin(standin)   if bfdirstate[bfile] is not 'r': - bfutil.update_standin(self, standin) + bfutil.updatestandin(self, standin)   bfdirstate.normal(bfutil.unixpath(bfile))   else:   path = bfutil.unixpath(bfile)   try:   # Mercurial >= 1.9   bfdirstate.drop(path)   except AttributeError:   # Mercurial <= 1.8   bfdirstate.forget(path)   bfdirstate.write()     # Cook up a new matcher that only matches regular files or   # standins corresponding to the big files requested by the user.   # Have to modify _files to prevent commit() from complaining   # "not tracked" for big files. - bfiles = bfutil.list_bfiles(repo) + bfiles = bfutil.listbfiles(repo)   match = copy.copy(match)   orig_matchfn = match.matchfn     # Check both the list of bfiles and the list of standins because if a bfile was removed, it   # won't be in the list of bfiles at this point   match._files += sorted(standins)     actualfiles = []   for f in match._files:   fstandin = bfutil.standin(f)     # Ignore known bfiles and standins   if f in bfiles or fstandin in standins:   continue     # Append directory separator to avoid collisions   if not fstandin.endswith(os.sep):   fstandin += os.sep     # Prevalidate matching standin directories   if any(st for st in match._files if st.startswith(fstandin)):   continue   actualfiles.append(f)   match._files = actualfiles     def matchfn(f):   if orig_matchfn(f):   return f not in bfiles   else:   return f in standins     match.matchfn = matchfn   return orig(text=text, user=user, date=date, match=match,   force=force, editor=editor, extra=extra)   finally:   wlock.release()     def push(self, remote, force=False, revs=None, newbranch=False):   o = bfutil.findoutgoing(repo, remote, force)   if o:   toupload = set()   o = repo.changelog.nodesbetween(o, revs)[0]   for n in o:   parents = [p for p in repo.changelog.parents(n) if p != node.nullid]   ctx = repo[n]   files = set(ctx.files())   if len(parents) == 2:   mc = ctx.manifest()   mp1 = ctx.parents()[0].manifest()   mp2 = ctx.parents()[1].manifest()   for f in mp1:   if f not in mc:   files.add(f)   for f in mp2:   if f not in mc:   files.add(f)   for f in mc:   if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):   files.add(f)   - toupload = toupload.union(set([ctx[f].data().strip() for f in files if bfutil.is_standin(f) and f in ctx])) - bfcommands.upload_bfiles(ui, self, remote, toupload) + toupload = toupload.union(set([ctx[f].data().strip() for f in files if bfutil.isstandin(f) and f in ctx])) + bfcommands.uploadbfiles(ui, self, remote, toupload)   # Mercurial >= 1.6 takes the newbranch argument, try that first.   try:   return super(bfiles_repo, self).push(remote, force, revs, newbranch)   except TypeError:   return super(bfiles_repo, self).push(remote, force, revs)     repo.__class__ = bfiles_repo    # Add works by going through the files that the user wanted to add  # and checking if they should be added as bfiles. Then making a new  # matcher which matches only the normal files and running the original  # version of add.  def override_add(orig, ui, repo, *pats, **opts):   bf = opts.pop('bf', None)     bfsize = opts.pop('bfsize', None)   if bfsize:   try:   bfsize = int(bfsize)   except ValueError:   raise util.Abort(_('size must be an integer, was %s\n') % bfsize)   else: - if os.path.exists(repo.wjoin(bfutil.short_name)): - bfsize = ui.config(bfutil.long_name, 'size', default='10') + if os.path.exists(repo.wjoin(bfutil.shortname)): + bfsize = ui.config(bfutil.longname, 'size', default='10')   if bfsize:   try:   bfsize = int(bfsize)   except ValueError:   raise util.Abort(_('bfiles.size must be integer, was %s\n') % bfsize)     bfmatcher = None - if os.path.exists(repo.wjoin(bfutil.short_name)): - bfpats = ui.config(bfutil.long_name, 'patterns', default=()) + if os.path.exists(repo.wjoin(bfutil.shortname)): + bfpats = ui.config(bfutil.longname, 'patterns', default=())   if bfpats:   bfpats = bfpats.split(' ')   bfmatcher = match_.match(repo.root, '', list(bfpats))     bfnames = []   try:   # Mercurial >= 1.9   m = scmutil.match(repo[None], pats, opts)   except ImportError:   # Mercurial <= 1.8   m = cmdutil.match(repo, pats, opts)   m.bad = lambda x,y: None   wctx = repo[None]   for f in repo.walk(m):   exact = m.exact(f)   bfile = bfutil.standin(f) in wctx   nfile = f in wctx     if exact and bfile:   ui.warn(_('%s already a bfile\n') % f)   continue   # Don't warn the user when they attempt to add a normal tracked file. The normal add code   # will do that for us.   if exact and nfile:   continue   if exact or (not bfile and not nfile):   if bf or (bfsize and os.path.getsize(repo.wjoin(f)) >= bfsize*1024*1024) \   or (bfmatcher and bfmatcher(f)):   bfnames.append(f)   if ui.verbose or not exact:   ui.status(_('adding %s as bfile\n') % m.rel(f))     bad = []   standins = []     # Need to lock otherwise there could be a race condition inbetween when standins are created   # and added to the repo   wlock = repo.wlock()   try:   if not opts.get('dry_run'): - bfdirstate = bfutil.open_bfdirstate(ui, repo) + bfdirstate = bfutil.openbfdirstate(ui, repo)   for f in bfnames:   standinname = bfutil.standin(f) - bfutil.write_standin(repo, standinname, hash='', executable=bfutil.get_executable(repo.wjoin(f))) + bfutil.writestandin(repo, standinname, hash='', executable=bfutil.getexecutable(repo.wjoin(f)))   standins.append(standinname)   if bfdirstate[bfutil.unixpath(f)] == 'r':   bfdirstate.normallookup(bfutil.unixpath(f))   else:   bfdirstate.add(bfutil.unixpath(f))   bfdirstate.write() - bad += [bfutil.split_standin(f) for f in bfutil.repo_add(repo, standins) if f in m.files()] + bad += [bfutil.splitstandin(f) for f in bfutil.repo_add(repo, standins) if f in m.files()]   finally:   wlock.release()     try:   # Mercurial >= 1.9   oldmatch = scmutil.match   except ImportError:   # Mercurial <= 1.8   oldmatch = cmdutil.match   manifest = repo[None].manifest()   def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'):   match = oldmatch(repo, pats, opts, globbed, default)   m = copy.copy(match) - notbfile = lambda f: not bfutil.is_standin(f) and bfutil.standin(f) not in manifest + notbfile = lambda f: not bfutil.isstandin(f) and bfutil.standin(f) not in manifest   m._files = [f for f in m._files if notbfile(f)]   m._fmap = set(m._files)   orig_matchfn = m.matchfn   m.matchfn = lambda f: notbfile(f) and orig_matchfn(f) or None   return m   try:   # Mercurial >= 1.9   scmutil.match = override_match   result = orig(ui, repo, *pats, **opts)   scmutil.match = oldmatch   except ImportError:   # Mercurial <= 1.8   cmdutil.match = override_match   result = orig(ui, repo, *pats, **opts)   cmdutil.match = oldmatch     return (result is 1 or bad) and 1 or 0    def override_remove(orig, ui, repo, *pats, **opts):   wctx = repo[None].manifest()   try:   # Mercurial >= 1.9   oldmatch = scmutil.match   except ImportError:   # Mercurial <= 1.8   oldmatch = cmdutil.match   def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'):   match = oldmatch(repo, pats, opts, globbed, default)   m = copy.copy(match) - notbfile = lambda f: not bfutil.is_standin(f) and bfutil.standin(f) not in wctx + notbfile = lambda f: not bfutil.isstandin(f) and bfutil.standin(f) not in wctx   m._files = [f for f in m._files if notbfile(f)]   m._fmap = set(m._files)   orig_matchfn = m.matchfn   m.matchfn = lambda f: orig_matchfn(f) and notbfile(f)   return m   try:   # Mercurial >= 1.9   scmutil.match = override_match   orig(ui, repo, *pats, **opts)   scmutil.match = oldmatch   except ImportError:   # Mercurial <= 1.8   cmdutil.match = override_match   orig(ui, repo, *pats, **opts)   cmdutil.match = oldmatch     after, force = opts.get('after'), opts.get('force')   if not pats and not after:   raise util.Abort(_('no files specified'))   try:   # Mercurial >= 1.9   m = scmutil.match(repo[None], pats, opts)   except ImportError:   # Mercurial <= 1.8   m = cmdutil.match(repo, pats, opts)   try:   repo.bfstatus = True   s = repo.status(match=m, clean=True)   finally:   repo.bfstatus = False   modified, added, deleted, clean = [[f for f in list if bfutil.standin(f) in wctx] for list in [s[0], s[1], s[3], s[6]]]     def warn(files, reason):   for f in files:   ui.warn(_('not removing %s: file %s (use -f to force removal)\n')   % (m.rel(f), reason))     if force:   remove, forget = modified + deleted + clean, added   elif after:   remove, forget = deleted, []   warn(modified + added + clean, _('still exists'))   else:   remove, forget = deleted + clean, []   warn(modified, _('is modified'))   warn(added, _('has been marked for add'))     for f in sorted(remove + forget):   if ui.verbose or not m.exact(f):   ui.status(_('removing %s\n') % m.rel(f))     # Need to lock because standin files are deleted then removed from the repository   # and we could race inbetween.   wlock = repo.wlock()   try: - bfdirstate = bfutil.open_bfdirstate(ui, repo) + bfdirstate = bfutil.openbfdirstate(ui, repo)   for f in remove:   if not after:   os.unlink(repo.wjoin(f))   currentdir = os.path.split(f)[0]   while currentdir and not os.listdir(repo.wjoin(currentdir)):   os.rmdir(repo.wjoin(currentdir))   currentdir = os.path.split(currentdir)[0]   bfdirstate.remove(bfutil.unixpath(f))   bfdirstate.write()     forget = [bfutil.standin(f) for f in forget]   remove = [bfutil.standin(f) for f in remove]   bfutil.repo_forget(repo, forget)   bfutil.repo_remove(repo, remove, unlink=True)   finally:   wlock.release()    def override_status(orig, ui, repo, *pats, **opts):   try:   repo.bfstatus = True   return orig(ui, repo, *pats, **opts)   finally:   repo.bfstatus = False    def override_log(orig, ui, repo, *pats, **opts):   try:   repo.bfstatus = True   orig(ui, repo, *pats, **opts)   finally:   repo.bfstatus = False    def override_verify(orig, ui, repo, *pats, **opts):   bf = opts.pop('bf', False)   all = opts.pop('bfa', False)   contents = opts.pop('bfc', False)     result = orig(ui, repo, *pats, **opts)   if bf: - result = result or bfcommands.verify_bfiles(ui, repo, all, contents) + result = result or bfcommands.verifybfiles(ui, repo, all, contents)   return result    # Override needs to refresh standins so that update's normal merge  # will go through properly. Then the other update hook (overriding repo.update)  # will get the new files. Filemerge is also overriden so that the merge  # will merge standins correctly.  def override_update(orig, ui, repo, *pats, **opts): - bfdirstate = bfutil.open_bfdirstate(ui, repo) + bfdirstate = bfutil.openbfdirstate(ui, repo)   s = bfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, False, False)   (unsure, modified, added, removed, missing, unknown, ignored, clean) = s     # Need to lock between the standins getting updated and their bfiles getting updated   wlock = repo.wlock()   try:   if opts['check']:   mod = len(modified) > 0   for bfile in unsure:   standin = bfutil.standin(bfile)   if repo['.'][standin].data().strip() != bfutil.hashfile(repo.wjoin(bfile)):   mod = True   else:   bfdirstate.normal(bfutil.unixpath(bfile))   bfdirstate.write()   if mod:   raise util.Abort(_('uncommitted local changes'))   # XXX handle removed differently   if not opts['clean']:   for bfile in unsure + modified + added: - bfutil.update_standin(repo, bfutil.standin(bfile)) + bfutil.updatestandin(repo, bfutil.standin(bfile))   finally:   wlock.release()   return orig(ui, repo, *pats, **opts)    # Override filemerge to prompt the user about how they wish to merge bfiles.  # This will handle identical edits, and copy/rename + edit without prompting the user.  def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):   # Use better variable names here. Because this is a wrapper we cannot change   # the variable names in the function declaration.   fcdest, fcother, fcancestor = fcd, fco, fca - if not bfutil.is_standin(orig): + if not bfutil.isstandin(orig):   return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)   else:   if not fcother.cmp(fcdest): # files identical?   return None     if fcancestor == fcother: # backwards, use working dir parent as ancestor   fcancestor = fcdest.parents()[0]     if orig != fcother.path():   repo.ui.status(_('merging %s and %s to %s\n') - % (bfutil.split_standin(orig), bfutil.split_standin(fcother.path()), bfutil.split_standin(fcdest.path()))) + % (bfutil.splitstandin(orig), bfutil.splitstandin(fcother.path()), bfutil.splitstandin(fcdest.path())))   else: - repo.ui.status(_('merging %s\n') % bfutil.split_standin(fcdest.path())) + repo.ui.status(_('merging %s\n') % bfutil.splitstandin(fcdest.path()))     if fcancestor.path() != fcother.path() and fcother.data() == fcancestor.data():   return 0   if fcancestor.path() != fcdest.path() and fcdest.data() == fcancestor.data():   repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())   return 0     if repo.ui.promptchoice(_('bfile %s has a merge conflict\n' - 'keep (l)ocal or take (o)ther?') % bfutil.split_standin(orig), + 'keep (l)ocal or take (o)ther?') % bfutil.splitstandin(orig),   (_('&Local'), _('&Other')), 0) == 0:   return 0   else:   repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())   return 0    # Copy first changes the matchers to match standins instead of bfiles.  # Then it overrides util.copyfile in that function it checks if the destination  # bfile already exists. It also keeps a list of copied files so that the bfiles  # can be copied and the dirstate updated.  def override_copy(orig, ui, repo, pats, opts, rename=False):   # doesn't remove bfile on rename   if len(pats) < 2:   # this isn't legal, let the original function deal with it   return orig(ui, repo, pats, opts, rename)     def makestandin(relpath):   try:   # Mercurial >= 1.9   path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)   except ImportError:   # Mercurial <= 1.8   path = util.canonpath(repo.root, repo.getcwd(), relpath)   return os.path.join(os.path.relpath('.', repo.getcwd()), bfutil.standin(path))     try:   # Mercurial >= 1.9   fullpats = scmutil.expandpats(pats)   except ImportError:   # Mercurial <= 1.8   fullpats = cmdutil.expandpats(pats)   dest = fullpats[-1]     if os.path.isdir(dest):   if not os.path.isdir(makestandin(dest)):   os.makedirs(makestandin(dest))   # This could copy both bfiles and normal files in one command, but we don't want   # to do that first replace their matcher to only match normal files and run it   # then replace it to just match bfiles and run it again   nonormalfiles = False   nobfiles = False   try:   # Mercurial >= 1.9   oldmatch = scmutil.match   except ImportError:   # Mercurial <= 1.8   oldmatch = cmdutil.match   try:   manifest = repo[None].manifest()   def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'):   match = oldmatch(repo, pats, opts, globbed, default)   m = copy.copy(match) - notbfile = lambda f: not bfutil.is_standin(f) and bfutil.standin(f) not in manifest + notbfile = lambda f: not bfutil.isstandin(f) and bfutil.standin(f) not in manifest   m._files = [f for f in m._files if notbfile(f)]   m._fmap = set(m._files)   orig_matchfn = m.matchfn   m.matchfn = lambda f: notbfile(f) and orig_matchfn(f) or None   return m   try:   # Mercurial >= 1.9   scmutil.match = override_match   except ImportError:   # Mercurial <= 1.8   cmdutil.match = override_match   result = orig(ui, repo, pats, opts, rename)   except util.Abort as e:   if str(e) != 'no files to copy':   raise e   else:   nonormalfiles = True   result = 0   finally:   try:   # Mercurial >= 1.9   scmutil.match = oldmatch   except ImportError:   # Mercurial <= 1.8   cmdutil.match = oldmatch     # The first rename can cause our current working directory to be removed. In that case   # there is nothing left to copy/rename so just quit.   try:   repo.getcwd()   except OSError:   return result     try:   # When we call orig below it creates the standins but we don't add them to the dir state   # until later so lock during that time.   wlock = repo.wlock()     manifest = repo[None].manifest()   def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'):   newpats = []   # The patterns were previously mangled to add .hgbfiles, we need to remove that now   for pat in pats: - if match_.patkind(pat) == None and bfutil.short_name in pat: - newpats.append(pat.replace( bfutil.short_name, '')) + if match_.patkind(pat) == None and bfutil.shortname in pat: + newpats.append(pat.replace(bfutil.shortname, ''))   else:   newpats.append(pat)   match = oldmatch(repo, newpats, opts, globbed, default)   m = copy.copy(match)   bfile = lambda f: bfutil.standin(f) in manifest   m._files = [bfutil.standin(f) for f in m._files if bfile(f)]   m._fmap = set(m._files)   orig_matchfn = m.matchfn - m.matchfn = lambda f: bfutil.is_standin(f) and bfile(bfutil.split_standin(f)) and orig_matchfn(bfutil.split_standin(f)) or None + m.matchfn = lambda f: bfutil.isstandin(f) and bfile(bfutil.splitstandin(f)) and orig_matchfn(bfutil.splitstandin(f)) or None   return m   try:   # Mercurial >= 1.9   scmutil.match = override_match   except ImportError:   # Mercurial <= 1.9   cmdutil.match = override_match   listpats = []   for pat in pats:   if match_.patkind(pat) != None:   listpats.append(pat)   else:   listpats.append(makestandin(pat))     try:   origcopyfile = util.copyfile   copiedfiles = []   def override_copyfile(src, dest): - if bfutil.short_name in src and bfutil.short_name in dest: - destbfile = dest.replace(bfutil.short_name, '') + if bfutil.shortname in src and bfutil.shortname in dest: + destbfile = dest.replace(bfutil.shortname, '')   if not opts['force'] and os.path.exists(destbfile):   raise IOError('', _('destination bfile already exists'))   copiedfiles.append((src, dest))   origcopyfile(src, dest)     util.copyfile = override_copyfile   result += orig(ui, repo, listpats, opts, rename)   finally:   util.copyfile = origcopyfile   - bfdirstate = bfutil.open_bfdirstate(ui, repo) + bfdirstate = bfutil.openbfdirstate(ui, repo)   for (src, dest) in copiedfiles: - if bfutil.short_name in src and bfutil.short_name in dest: - srcbfile = src.replace(bfutil.short_name, '') - destbfile = dest.replace(bfutil.short_name, '') + if bfutil.shortname in src and bfutil.shortname in dest: + srcbfile = src.replace(bfutil.shortname, '') + destbfile = dest.replace(bfutil.shortname, '')   destbfiledir = os.path.dirname(destbfile) or '.'   if not os.path.isdir(destbfiledir):   os.makedirs(destbfiledir)   if rename:   os.rename(srcbfile, destbfile)   bfdirstate.remove(bfutil.unixpath(os.path.relpath(srcbfile, repo.root)))   else:   util.copyfile(srcbfile, destbfile)   bfdirstate.add(bfutil.unixpath(os.path.relpath(destbfile, repo.root)))   bfdirstate.write()   except util.Abort as e:   if str(e) != 'no files to copy':   raise e   else:   nobfiles = True   finally:   try:   # Mercurial >= 1.9   scmutil.match = oldmatch   except ImportError:   # Mercurial <= 1.8   cmdutil.match = oldmatch   wlock.release()     if nobfiles and nonormalfiles:   raise util.Abort(_('no files to copy'))     return result    # When the user calls revert, we have to be careful to not revert any changes to other  # bfiles accidentally. This means we have to keep track of the bfiles that are  # being reverted so we only pull down the necessary bfiles.  #  # Standins are only updated (to match the hash of bfiles) before commits.  # Update the standins then run the original revert (changing the matcher to hit standins  # instead of bfiles). Based on the resulting standins update the bfiles. Then return the  # standins to their proper state  def override_revert(orig, ui, repo, *pats, **opts):   # Because we put the standins in a bad state (by updating them) and then return them   # to a correct state we need to lock to prevent others from changing them in their   # incorrect state.   wlock = repo.wlock()   try: - bfdirstate = bfutil.open_bfdirstate(ui, repo) + bfdirstate = bfutil.openbfdirstate(ui, repo)   (modified, added, removed, missing, unknown, ignored, clean) = bfutil.bfdirstate_status(bfdirstate, repo, repo['.'].rev())   for bfile in modified: - bfutil.update_standin(repo, bfutil.standin(bfile)) + bfutil.updatestandin(repo, bfutil.standin(bfile))     try:   # Mercurial >= 1.9   oldmatch = scmutil.match   except ImportError:   # Mercurial <= 1.8   oldmatch = cmdutil.match   try:   ctx = repo[opts.get('rev')]   def override_match(ctxorrepo, pats=[], opts={}, globbed=False, default='relpath'):   if hasattr(ctxorrepo, 'match'):   ctx0 = ctxorrepo   else:   ctx0 = ctxorrepo[None]   match = oldmatch(ctxorrepo, pats, opts, globbed, default)   m = copy.copy(match)   def tostandin(f):   if bfutil.standin(f) in ctx0 or bfutil.standin(f) in ctx:   return bfutil.standin(f) + elif bfutil.standin(f) in repo[None]: + return None   return f   m._files = [tostandin(f) for f in m._files] + m._files = [f for f in m._files if f is not None]   m._fmap = set(m._files)   orig_matchfn = m.matchfn   def matchfn(f): - if bfutil.is_standin(f): + if bfutil.isstandin(f):   # We need to keep track of what bfiles are being matched so we know which   # ones to update later (otherwise we revert changes to other bfiles   # accidentally). This is repo specific, so duckpunch the repo object to   # keep the list of bfiles for us later. - if(orig_matchfn(bfutil.split_standin(f)) and (f in repo[None] or f in ctx)): - bfiles_list = getattr(repo, '_bfiles_to_update', []) - bfiles_list.append(bfutil.split_standin(f)) - repo._bfiles_to_update = bfiles_list; + if(orig_matchfn(bfutil.splitstandin(f)) and (f in repo[None] or f in ctx)): + bfileslist = getattr(repo, '_bfilestoupdate', []) + bfileslist.append(bfutil.splitstandin(f)) + repo._bfilestoupdate = bfileslist;   return True   else:   return False   return orig_matchfn(f)   m.matchfn = matchfn   return m   try:   # Mercurial >= 1.9   scmutil.match = override_match + matches = override_match(repo[None], pats, opts)   except ImportError:   # Mercurial <= 1.8   cmdutil.match = override_match + matches = override_match(repo, pats, opts)   orig(ui, repo, *pats, **opts)   finally:   try:   # Mercurial >= 1.9   scmutil.match = oldmatch   except ImportError:   # Mercurial <= 1.8   cmdutil.match = oldmatch - bfiles_list = getattr(repo, '_bfiles_to_update', []) - bfcommands.revert_bfiles(ui, repo, bfiles_list) + bfileslist = getattr(repo, '_bfilestoupdate', []) + bfcommands.revertbfiles(ui, repo, bfileslist)   # Empty out the bfiles list so we start fresh next time - repo._bfiles_to_update = [] + repo._bfilestoupdate = []   for bfile in modified: - if bfile in bfiles_list: + if bfile in bfileslist:   if os.path.exists(repo.wjoin(bfutil.standin(bfile))) and bfile in repo['.']: - bfutil.write_standin(repo, bfutil.standin(bfile), repo['.'][bfile].data().strip(), 'x' in repo['.'][bfile].flags()) + bfutil.writestandin(repo, bfutil.standin(bfile), repo['.'][bfile].data().strip(), 'x' in repo['.'][bfile].flags()) + bfdirstate = bfutil.openbfdirstate(ui, repo) + for bfile in added: + standin = bfutil.standin(bfile) + if standin not in ctx and (standin in matches or opts.get('all')): + if bfile in bfdirstate: + try: + # Mercurial >= 1.9 + bfdirstate.drop(bfile) + except AttributeError: + # Mercurial <= 1.8 + bfdirstate.forget(bfile) + util.unlinkpath(repo.wjoin(standin)) + bfdirstate.write()   finally:   wlock.release()    def hg_update(orig, repo, node):   result = orig(repo, node)   # XXX check if it worked first - bfcommands.update_bfiles(repo.ui, repo) + bfcommands.updatebfiles(repo.ui, repo)   return result    def hg_clean(orig, repo, node, show_stats=True):   result = orig(repo, node, show_stats) - bfcommands.update_bfiles(repo.ui, repo) + bfcommands.updatebfiles(repo.ui, repo)   return result    def hg_merge(orig, repo, node, force=None, remind=True):   result = orig(repo, node, force, remind) - bfcommands.update_bfiles(repo.ui, repo) + bfcommands.updatebfiles(repo.ui, repo)   return result    # When we rebase a repository with remotely changed bfiles, we need  # to take some extra care so that the bfiles are correctly updated  # in the working copy  def override_pull(orig, ui, repo, source="default", **opts):   if opts.get('rebase', False):   repo._isrebasing = True   try:   if opts.get('update'):   del opts['update']   ui.debug('--update and --rebase are not compatible, ignoring '   'the update flag\n')   del opts['rebase']   try:   # Mercurial >= 1.9   cmdutil.bailifchanged(repo)   except AttributeError:   # Mercurial <= 1.8   cmdutil.bail_if_changed(repo)   revsprepull = len(repo)   origpostincoming = commands.postincoming   def _dummy(*args, **kwargs):   pass   commands.postincoming = _dummy   try:   result = commands.pull(ui, repo, source, **opts)   finally:   commands.postincoming = origpostincoming   revspostpull = len(repo)   if revspostpull > revsprepull:   result = result or rebase.rebase(ui, repo)   branch = repo[None].branch()   dest = repo[branch].rev()   finally:   repo._isrebasing = False   else:   result = orig(ui, repo, source, **opts)   return result    def override_rebase(orig, ui, repo, **opts):   repo._isrebasing = True   try:   orig(ui, repo, **opts)   finally:   repo._isrebasing = False    def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,   prefix=None, mtime=None, subrepos=None):   # No need to lock because we are only reading history and bfile caches   # neither of which are modified     if kind not in archival.archivers:   raise util.Abort(_("unknown archive type '%s'") % kind)     ctx = repo[node]     # In Mercurial <= 1.5 the prefix is passed to the archiver so try that   # if that doesn't work we are probably in Mercurial >= 1.6 where the   # prefix is not handled by the archiver   try:   archiver = archival.archivers[kind](dest, prefix, mtime or ctx.date()[0])     def write(name, mode, islink, getdata):   if matchfn and not matchfn(name):   return   data = getdata()   if decode:   data = repo.wwritedata(name, data)   archiver.addfile(name, mode, islink, data)   except TypeError:   if kind == 'files':   if prefix:   raise util.Abort(_('cannot give prefix when archiving to files'))   else:   prefix = archival.tidyprefix(dest, kind, prefix)     def write(name, mode, islink, getdata):   if matchfn and not matchfn(name):   return   data = getdata()   if decode:   data = repo.wwritedata(name, data)   archiver.addfile(prefix + name, mode, islink, data)     archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])     if repo.ui.configbool("ui", "archivemeta", True):   def metadata():   base = 'repo: %s\nnode: %s\nbranch: %s\n' % (   hex(repo.changelog.node(0)), hex(node), ctx.branch())     tags = ''.join('tag: %s\n' % t for t in ctx.tags()   if repo.tagtype(t) == 'global')   if not tags:   repo.ui.pushbuffer()   opts = {'template': '{latesttag}\n{latesttagdistance}',   'style': '', 'patch': None, 'git': None}   cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)   ltags, dist = repo.ui.popbuffer().split('\n')   tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))   tags += 'latesttagdistance: %s\n' % dist     return base + tags     write('.hg_archival.txt', 0644, False, metadata)     for f in ctx:   ff = ctx.flags(f)   getdata = ctx[f].data - if bfutil.is_standin(f): - path = bfutil.find_file(repo, getdata().strip()) + if bfutil.isstandin(f): + path = bfutil.findfile(repo, getdata().strip())   ### TODO: What if the file is not cached? - f = bfutil.split_standin(f) + f = bfutil.splitstandin(f)     def getdatafn():   with open(path, 'rb') as fd:   return fd.read()     getdata = getdatafn   write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)     if subrepos:   for subpath in ctx.substate:   sub = ctx.sub(subpath)   try:   sub.archive(repo.ui, archiver, prefix)   except TypeError:   sub.archive(archiver, prefix)     archiver.done()    # If a bfile is modified the change is not reflected in its standin until a commit.  # cmdutil.bailifchanged raises an exception if the repo has uncommitted changes.  # Wrap it to also check if bfiles were changed. This is used by bisect and backout.  def override_bailifchanged(orig, repo):   orig(repo)   repo.bfstatus = True   modified, added, removed, deleted = repo.status()[:4]   repo.bfstatus = False   if modified or added or removed or deleted:   raise util.Abort(_('outstanding uncommitted changes'))    # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check  def override_fetch(orig, ui, repo, *pats, **opts):   repo.bfstatus = True   modified, added, removed, deleted = repo.status()[:4]   repo.bfstatus = False   if modified or added or removed or deleted:   raise util.Abort(_('outstanding uncommitted changes'))   return orig(ui, repo, *pats, **opts)    def override_forget(orig, ui, repo, *pats, **opts):   wctx = repo[None].manifest()   try:   # Mercurial >= 1.9   oldmatch = scmutil.match   except ImportError:   # Mercurial <= 1.8   oldmatch = cmdutil.match   def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'):   match = oldmatch(repo, pats, opts, globbed, default)   m = copy.copy(match) - notbfile = lambda f: not bfutil.is_standin(f) and bfutil.standin(f) not in wctx + notbfile = lambda f: not bfutil.isstandin(f) and bfutil.standin(f) not in wctx   m._files = [f for f in m._files if notbfile(f)]   m._fmap = set(m._files)   orig_matchfn = m.matchfn   m.matchfn = lambda f: orig_matchfn(f) and notbfile(f)   return m   try:   # Mercurial >= 1.9   scmutil.match = override_match   orig(ui, repo, *pats, **opts)   scmutil.match = oldmatch   m = scmutil.match(repo[None], pats, opts)   except ImportError:   # Mercurial <= 1.8   cmdutil.match = override_match   orig(ui, repo, *pats, **opts)   cmdutil.match = oldmatch   m = cmdutil.match(repo, pats, opts)     try:   repo.bfstatus = True   s = repo.status(match=m, clean=True)   finally:   repo.bfstatus = False   forget = sorted(s[0] + s[1] + s[3] + s[6])   forget = [f for f in forget if bfutil.standin(f) in wctx]     for f in forget:   if bfutil.standin(f) not in repo.dirstate and not os.path.isdir(m.rel(bfutil.standin(f))):   ui.warn(_('not removing %s: file is already untracked\n')   % m.rel(f))     for f in forget:   if ui.verbose or not m.exact(f):   ui.status(_('removing %s\n') % m.rel(f))     # Need to lock because standin files are deleted then removed from the repository   # and we could race inbetween.   wlock = repo.wlock()   try: - bfdirstate = bfutil.open_bfdirstate(ui, repo) + bfdirstate = bfutil.openbfdirstate(ui, repo)   for f in forget:   bfdirstate.remove(bfutil.unixpath(f))   bfdirstate.write()   bfutil.repo_remove(repo, [bfutil.standin(f) for f in forget], unlink=True)   finally:   wlock.release()   -def get_outgoing_bfiles(ui, repo, dest=None, **opts): +def getoutgoingbfiles(ui, repo, dest=None, **opts):   dest = ui.expandpath(dest or 'default-push', dest or 'default')   dest, branches = hg.parseurl(dest, opts.get('branch'))   revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))   if revs:   revs = [repo.lookup(rev) for rev in revs]     # Mercurial <= 1.5 had remoteui in cmdutil, then it moved to hg   try:   remoteui = cmdutil.remoteui   except AttributeError:   remoteui = hg.remoteui     try:   remote = hg.repository(remoteui(repo, opts), dest)   except error.RepoError:   return None   o = bfutil.findoutgoing(repo, remote, False)   if not o:   return None   o = repo.changelog.nodesbetween(o, revs)[0]   if opts.get('newest_first'):   o.reverse()     toupload = set()   for n in o:   parents = [p for p in repo.changelog.parents(n) if p != node.nullid]   ctx = repo[n]   files = set(ctx.files())   if len(parents) == 2:   mc = ctx.manifest()   mp1 = ctx.parents()[0].manifest()   mp2 = ctx.parents()[1].manifest()   for f in mp1:   if f not in mc:   files.add(f)   for f in mp2:   if f not in mc:   files.add(f)   for f in mc:   if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):   files.add(f) - toupload = toupload.union(set([f for f in files if bfutil.is_standin(f) and f in ctx])) + toupload = toupload.union(set([f for f in files if bfutil.isstandin(f) and f in ctx]))   return toupload    def override_outgoing(orig, ui, repo, dest=None, **opts):   orig(ui, repo, dest, **opts)     if opts.pop('bf', None): - toupload = get_outgoing_bfiles(ui, repo, dest, **opts) + toupload = getoutgoingbfiles(ui, repo, dest, **opts)   if toupload is None:   ui.status(_('kbfiles: No remote repo\n'))   else:   ui.status(_('kbfiles to upload:\n'))   for file in toupload: - ui.status(bfutil.split_standin(file) + '\n') + ui.status(bfutil.splitstandin(file) + '\n')   ui.status('\n')    def override_summary(orig, ui, repo, *pats, **opts):   orig(ui, repo, *pats, **opts)     if opts.pop('bf', None): - toupload = get_outgoing_bfiles(ui, repo, None, **opts) + toupload = getoutgoingbfiles(ui, repo, None, **opts)   if toupload is None:   ui.status(_('kbfiles: No remote repo\n'))   else:   ui.status(_('kbfiles: %d to upload\n') % len(toupload))    def override_addremove(orig, ui, repo, *pats, **opts):   # Check if the parent or child has bfiles if they do don't allow it.   # If there is a symlink in the manifest then getting the manifest throws an exception   # catch it and let addremove deal with it. This happens in Mercurial's test   # test-addremove-symlink   try:   manifesttip = set(repo['tip'].manifest())   except util.Abort:   manifesttip = set()   try:   manifestworking = set(repo[None].manifest())   except util.Abort:   manifestworking = set()     # Manifests are only iterable so turn them into sets then union   for file in manifesttip.union(manifestworking): - if file.startswith(bfutil.short_name): + if file.startswith(bfutil.shortname):   raise util.Abort(_('addremove cannot be run on a repo with bfiles'))     return orig(ui, repo, *pats, **opts)    # Calling purge with --all will cause the kbfiles to be deleted.  # Override repo.status to prevent this from happening.  def override_purge(orig, ui, repo, *dirs, **opts):   oldstatus = repo.status   def override_status(node1='.', node2=None, match=None, ignored=False,   clean=False, unknown=False, listsubrepos=False):   r = oldstatus(node1, node2, match, ignored, clean, unknown,   listsubrepos) - bfdirstate = bfutil.open_bfdirstate(ui, repo) + bfdirstate = bfutil.openbfdirstate(ui, repo)   modified, added, removed, deleted, unknown, ignored, clean = r   unknown = [f for f in unknown if bfdirstate[f] == '?']   ignored = [f for f in ignored if bfdirstate[f] == '?']   return modified, added, removed, deleted, unknown, ignored, clean   repo.status = override_status   orig(ui, repo, *dirs, **opts)   repo.status = oldstatus   +def override_rollback(orig, ui, repo, **opts): + result = orig(ui, repo, **opts) + merge.update(repo, node=None, branchmerge=False, force=True, partial=bfutil.isstandin) + bfdirstate = bfutil.openbfdirstate(ui, repo) + bfiles = bfutil.listbfiles(repo) + oldbfiles = bfutil.listbfiles(repo, repo[None].parents()[0].rev()) + for file in bfiles: + if file in oldbfiles: + bfdirstate.normallookup(file) + else: + bfdirstate.add(file) + bfdirstate.write() + return result +  def uisetup(ui):   # Disable auto-status for some commands which assume that all   # files in the result are under Mercurial's control     entry = extensions.wrapcommand(commands.table, 'add', override_add)   addopt = [('', 'bf', None, _('add as bfile')),   ('', 'bfsize', '', _('add all files above this size (in megabytes) as bfiles (default: 10)'))]   entry[1].extend(addopt)     entry = extensions.wrapcommand(commands.table, 'addremove', override_addremove)   entry = extensions.wrapcommand(commands.table, 'remove', override_remove)   entry = extensions.wrapcommand(commands.table, 'forget', override_forget)   entry = extensions.wrapcommand(commands.table, 'status', override_status)   entry = extensions.wrapcommand(commands.table, 'log', override_log) + entry = extensions.wrapcommand(commands.table, 'rollback', override_rollback)     entry = extensions.wrapcommand(commands.table, 'verify', override_verify)   verifyopt = [('', 'bf', None, _('verify bfiles')),   ('', 'bfa', None, _('verify all revisions of bfiles not just current')),   ('', 'bfc', None, _('verify bfile contents not just existence'))]   entry[1].extend(verifyopt)     entry = extensions.wrapcommand(commands.table, 'outgoing', override_outgoing)   outgoingopt = [('', 'bf', None, _('display outgoing bfiles'))]   entry[1].extend(outgoingopt)   entry = extensions.wrapcommand(commands.table, 'summary', override_summary)   summaryopt = [('', 'bf', None, _('display outgoing bfiles'))]   entry[1].extend(summaryopt)     entry = extensions.wrapcommand(commands.table, 'update', override_update)   entry = extensions.wrapcommand(commands.table, 'pull', override_pull)   entry = extensions.wrapfunction(filemerge, 'filemerge', override_filemerge)   entry = extensions.wrapfunction(cmdutil, 'copy', override_copy)     # Backout calls revert so we need to override both the command and the function   entry = extensions.wrapcommand(commands.table, 'revert', override_revert)   entry = extensions.wrapfunction(commands, 'revert', override_revert)     # clone uses hg._update instead of hg.update even though they are the   # same function... so wrap both of them)   extensions.wrapfunction(hg, 'update', hg_update)   extensions.wrapfunction(hg, '_update', hg_update)   extensions.wrapfunction(hg, 'clean', hg_clean)   extensions.wrapfunction(hg, 'merge', hg_merge)     extensions.wrapfunction(archival, 'archive', override_archive)   if hasattr(cmdutil, 'bailifchanged'):   extensions.wrapfunction(cmdutil, 'bailifchanged', override_bailifchanged)   else:   extensions.wrapfunction(cmdutil, 'bail_if_changed', override_bailifchanged)     for name, module in extensions.extensions():   if name == 'fetch':   extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch', override_fetch)   if name == 'purge':   extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge', override_purge)   if name == 'rebase':   extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase', override_rebase) - -
 
15
16
17
18
19
 
 
20
21
22
 
124
125
126
127
128
 
 
129
130
131
132
133
 
134
135
 
136
137
138
139
140
141
 
 
142
143
144
 
 
145
146
147
 
 
148
149
 
150
151
152
 
153
154
155
156
157
 
158
159
160
 
177
178
179
180
 
181
182
183
 
 
184
185
186
 
215
216
217
218
 
219
220
221
222
 
223
224
225
 
226
227
228
 
229
230
231
232
 
233
234
235
236
237
 
 
238
239
 
240
241
242
243
244
 
 
245
246
247
248
 
 
 
249
250
251
252
 
 
 
253
254
255
256
257
 
 
 
 
258
259
 
260
261
 
262
263
264
 
266
267
268
269
270
 
 
271
272
 
273
274
275
276
277
278
 
279
280
281
 
 
282
283
284
 
292
293
294
295
 
296
297
298
299
 
300
301
302
 
303
304
305
 
314
315
316
317
 
318
319
 
320
321
322
 
323
324
 
325
326
327
328
329
 
330
331
332
333
334
335
 
 
336
337
338
339
 
 
340
341
 
342
343
 
344
345
 
346
347
 
348
349
 
350
351
352
 
384
385
386
387
 
388
389
390
 
393
394
395
396
 
397
398
399
400
401
402
403
 
404
405
406
 
412
413
414
415
 
416
417
418
419
 
420
421
422
 
15
16
17
 
 
18
19
20
21
22
 
124
125
126
 
 
127
128
129
130
131
132
 
133
134
 
135
136
137
138
139
 
 
140
141
142
 
 
143
144
145
 
 
146
147
148
 
149
150
151
 
152
153
154
155
156
 
157
158
159
160
 
177
178
179
 
180
181
 
 
182
183
184
185
186
 
215
216
217
 
218
219
220
221
 
222
223
224
 
225
226
227
 
228
229
230
231
 
232
233
234
235
 
 
236
237
238
 
239
240
241
242
 
 
243
244
245
 
 
 
246
247
248
249
 
 
 
250
251
252
253
 
 
 
 
254
255
256
257
258
 
259
260
 
261
262
263
264
 
266
267
268
 
 
269
270
271
 
272
273
274
275
276
277
 
278
279
 
 
280
281
282
283
284
 
292
293
294
 
295
296
297
298
 
299
300
301
 
302
303
304
305
 
314
315
316
 
317
318
 
319
320
321
 
322
323
 
324
325
326
327
328
 
329
330
331
332
333
 
 
334
335
336
337
 
 
338
339
340
 
341
342
 
343
344
 
345
346
 
347
348
 
349
350
351
352
 
384
385
386
 
387
388
389
390
 
393
394
395
 
396
397
398
399
400
401
402
 
403
404
405
406
 
412
413
414
 
415
416
417
418
 
419
420
421
422
@@ -15,8 +15,8 @@
 except ImportError:   pass   -short_name = '.kbf' -long_name = 'kilnbfiles' +shortname = '.kbf' +longname = 'kilnbfiles'      # -- Portability wrappers ---------------------------------------------- @@ -124,37 +124,37 @@
  shutil.copyfile(src, dest)   os.chmod(dest, os.stat(src).st_mode)   -def system_cache_path(ui, hash): - path = ui.config(long_name, 'systemcache', None) +def systemcachepath(ui, hash): + path = ui.config(longname, 'systemcache', None)   if path:   path = os.path.join(path, hash)   else:   if os.name == 'nt': - path = os.path.join(os.getenv('LOCALAPPDATA') or os.getenv('APPDATA'), long_name, hash) + path = os.path.join(os.getenv('LOCALAPPDATA') or os.getenv('APPDATA'), longname, hash)   elif os.name == 'posix': - path = os.path.join(os.getenv('HOME'), '.' + long_name, hash) + path = os.path.join(os.getenv('HOME'), '.' + longname, hash)   else:   raise util.Abort(_('Unknown operating system: %s\n') % os.name)   return path   -def in_system_cache(ui, hash): - return os.path.exists(system_cache_path(ui, hash)) +def insystemcache(ui, hash): + return os.path.exists(systemcachepath(ui, hash))   -def find_file(repo, hash): - if in_cache(repo, hash): +def findfile(repo, hash): + if incache(repo, hash):   repo.ui.note(_('Found %s in cache\n') % hash) - return cache_path(repo, hash) - if in_system_cache(repo.ui, hash): + return cachepath(repo, hash) + if insystemcache(repo.ui, hash):   repo.ui.note(_('Found %s in system cache\n') % hash) - return system_cache_path(repo.ui, hash) + return systemcachepath(repo.ui, hash)   return None   -def open_bfdirstate(ui, repo): +def openbfdirstate(ui, repo):   '''   Return a dirstate object that tracks big files: i.e. its root is the   repo root, but it is saved in .hg/bfiles/dirstate.   ''' - admin = repo.join(long_name) + admin = repo.join(longname)   try:   # Mercurial >= 1.9   opener = scmutil.opener(admin) @@ -177,10 +177,10 @@
  # .hg/bfiles/{pending,committed}).   if not os.path.exists(os.path.join(admin, 'dirstate')):   util.makedirs(admin) - matcher = get_standin_matcher(repo) + matcher = getstandinmatcher(repo)   for standin in dirstate_walk(repo.dirstate, matcher): - bigfile = split_standin(standin) - hash = read_standin(repo, standin) + bigfile = splitstandin(standin) + hash = readstandin(repo, standin)   try:   curhash = hashfile(bigfile)   except IOError, err: @@ -215,50 +215,50 @@
  wlock.release()   return (modified, added, removed, missing, unknown, ignored, clean)   -def list_bfiles(repo, rev=None, matcher=None): +def listbfiles(repo, rev=None, matcher=None):   '''list big files in the working copy or specified changeset'''     if matcher is None: - matcher = get_standin_matcher(repo) + matcher = getstandinmatcher(repo)     bfiles = [] - if rev: + if rev is not None:   cctx = repo[rev]   for standin in cctx.walk(matcher): - filename = split_standin(standin) + filename = splitstandin(standin)   bfiles.append(filename)   else:   for standin in sorted(dirstate_walk(repo.dirstate, matcher)): - filename = split_standin(standin) + filename = splitstandin(standin)   bfiles.append(filename)   return bfiles   -def in_cache(repo, hash): - return os.path.exists(cache_path(repo, hash)) +def incache(repo, hash): + return os.path.exists(cachepath(repo, hash))   -def create_dir(dir): +def createdir(dir):   if not os.path.exists(dir):   os.makedirs(dir)   -def cache_path(repo, hash): - return repo.join(os.path.join(long_name, hash)) +def cachepath(repo, hash): + return repo.join(os.path.join(longname, hash))   -def copy_to_cache(repo, rev, file, uploaded=False): - hash = read_standin(repo, standin(file)) - if in_cache(repo, hash): +def copytocache(repo, rev, file, uploaded=False): + hash = readstandin(repo, standin(file)) + if incache(repo, hash):   return - create_dir(os.path.dirname(cache_path(repo, hash))) - if in_system_cache(repo.ui, hash): - link(system_cache_path(repo.ui, hash), cache_path(repo, hash)) + createdir(os.path.dirname(cachepath(repo, hash))) + if insystemcache(repo.ui, hash): + link(systemcachepath(repo.ui, hash), cachepath(repo, hash))   else: - shutil.copyfile(repo.wjoin(file), cache_path(repo, hash)) - os.chmod(cache_path(repo, hash), os.stat(repo.wjoin(file)).st_mode) - create_dir(os.path.dirname(system_cache_path(repo.ui, hash))) - link(cache_path(repo, hash), system_cache_path(repo.ui, hash)) + shutil.copyfile(repo.wjoin(file), cachepath(repo, hash)) + os.chmod(cachepath(repo, hash), os.stat(repo.wjoin(file)).st_mode) + createdir(os.path.dirname(systemcachepath(repo.ui, hash))) + link(cachepath(repo, hash), systemcachepath(repo.ui, hash))   -def get_standin_matcher(repo, pats=[], opts={}): +def getstandinmatcher(repo, pats=[], opts={}):   '''Return a match object that applies pats to <repo>/.kbf.''' - standin_dir = repo.pathto(short_name) + standindir = repo.pathto(shortname)   if pats:   # patterns supplied: search .hgbfiles relative to current dir   cwd = repo.getcwd() @@ -266,19 +266,19 @@
  # cwd is an absolute path for hg -R <reponame>   # work relative to the repository root in this case   cwd = '' - pats = [os.path.join(standin_dir, cwd, pat) for pat in pats] - elif os.path.isdir(standin_dir): + pats = [os.path.join(standindir, cwd, pat) for pat in pats] + elif os.path.isdir(standindir):   # no patterns: relative to repo root - pats = [standin_dir] + pats = [standindir]   else:   # no patterns and no .hgbfiles dir: return matcher that matches nothing   match = match_.match(repo.root, None, [], exact=True)   match.matchfn = lambda f: False   return match - return get_matcher(repo, pats, opts, showbad=False) + return getmatcher(repo, pats, opts, showbad=False)   -def get_matcher(repo, pats=[], opts={}, showbad=True): - '''Wrapper around cmdutil.match() that adds showbad: if false, neuter +def getmatcher(repo, pats=[], opts={}, showbad=True): + '''Wrapper around scmutil.match() that adds showbad: if false, neuter   the match object\'s bad() method so it does not print any warnings   about missing files or directories.'''   try: @@ -292,14 +292,14 @@
  match.bad = lambda f, msg: None   return match   -def compose_standin_matcher(repo, rmatcher): +def composestandinmatcher(repo, rmatcher):   '''Return a matcher that accepts standins corresponding to the files   accepted by rmatcher. Pass the list of files in the matcher as the   paths specified by the user.''' - smatcher = get_standin_matcher(repo, rmatcher.files()) + smatcher = getstandinmatcher(repo, rmatcher.files())   isstandin = smatcher.matchfn   def composed_matchfn(f): - return isstandin(f) and rmatcher.matchfn(split_standin(f)) + return isstandin(f) and rmatcher.matchfn(splitstandin(f))   smatcher.matchfn = composed_matchfn     return smatcher @@ -314,39 +314,39 @@
  # 2) Join with '/' because that's what dirstate always uses, even on   # Windows. Change existing separator to '/' first in case we are   # passed filenames from an external source (like the command line). - return short_name + '/' + filename.replace(os.sep, '/') + return shortname + '/' + filename.replace(os.sep, '/')   -def is_standin(filename): +def isstandin(filename):   '''Return true if filename is a big file standin. filename must   be in Mercurial\'s internal form (slash-separated).''' - return filename.startswith(short_name+'/') + return filename.startswith(shortname + '/')   -def split_standin(filename): +def splitstandin(filename):   # Split on / because that's what dirstate always uses, even on Windows.   # Change local separator to / first just in case we are passed filenames   # from an external source (like the command line).   bits = filename.replace(os.sep, '/').split('/', 1) - if len(bits) == 2 and bits[0] == short_name: + if len(bits) == 2 and bits[0] == shortname:   return bits[1]   else:   return None   -def update_standin(repo, standin): - file = repo.wjoin(split_standin(standin)) +def updatestandin(repo, standin): + file = repo.wjoin(splitstandin(standin))   if os.path.exists(file):   hash = hashfile(file) - executable = get_executable(file) - write_standin(repo, standin, hash, executable) + executable = getexecutable(file) + writestandin(repo, standin, hash, executable)   -def read_standin(repo, standin): +def readstandin(repo, standin):   '''read hex hash from <repo.root>/<standin>''' - return read_hash(repo.wjoin(standin)) + return readhash(repo.wjoin(standin))   -def write_standin(repo, standin, hash, executable): +def writestandin(repo, standin, hash, executable):   '''write hhash to <repo.root>/<standin>''' - write_hash(hash, repo.wjoin(standin), executable) + writehash(hash, repo.wjoin(standin), executable)   -def copy_and_hash(instream, outfile): +def copyandhash(instream, outfile):   '''Read bytes from instream (iterable) and write them to outfile,   computing the SHA-1 hash of the data along the way. Close outfile   when done and return the binary hash.''' @@ -384,7 +384,7 @@
  # Same blecch as above.   infile.close()   -def read_hash(filename): +def readhash(filename):   rfile = open(filename, 'rb')   hash = rfile.read(40)   rfile.close() @@ -393,14 +393,14 @@
  % (filename, len(hash)))   return hash   -def write_hash(hash, filename, executable): +def writehash(hash, filename, executable):   util.makedirs(os.path.dirname(filename))   if os.path.exists(filename):   os.unlink(filename)   if os.name == 'posix':   # Yuck: on Unix, go through open(2) to ensure that the caller's mode is   # filtered by umask() in the kernel, where it's supposed to be done. - wfile = os.fdopen(os.open(filename, os.O_WRONLY|os.O_CREAT, get_mode(executable)), 'wb') + wfile = os.fdopen(os.open(filename, os.O_WRONLY|os.O_CREAT, getmode(executable)), 'wb')   else:   # But on Windows, use open() directly, since passing mode='wb' to os.fdopen()   # does not work. (Python bug?) @@ -412,11 +412,11 @@
  finally:   wfile.close()   -def get_executable(filename): +def getexecutable(filename):   mode = os.stat(filename).st_mode   return (mode & stat.S_IXUSR) and (mode & stat.S_IXGRP) and (mode & stat.S_IXOTH)   -def get_mode(executable): +def getmode(executable):   if executable:   return 0755   else:
 
89
90
91
92
 
93
94
95
 
117
118
119
120
 
121
122
123
 
89
90
91
 
92
93
94
95
 
117
118
119
 
120
121
122
123
@@ -89,7 +89,7 @@
  # all fail too.   reason = err[0][1] # assumes err[0] is a socket.error   raise util.Abort('%s: %s' % (baseurl, reason)) - return bfutil.copy_and_hash(bfutil.blockstream(infile), tmpfile) + return bfutil.copyandhash(bfutil.blockstream(infile), tmpfile)     def _verify(self, hash):   try: @@ -117,7 +117,7 @@
  except AttributeError:   # Mercurial <= 1.8   baseurl, authinfo = url_.getauthinfo(self.url) - filename = bfutil.split_standin(standin) + filename = bfutil.splitstandin(standin)   if not filename:   return False   fctx = cctx[standin]
 
11
12
13
14
 
15
16
17
 
19
20
21
22
 
23
24
25
26
 
 
27
28
29
30
 
31
32
33
 
35
36
37
38
 
39
40
 
41
42
43
44
 
45
46
47
48
49
50
 
 
 
51
52
53
54
55
56
57
 
 
 
 
58
59
 
11
12
13
 
14
15
16
17
 
19
20
21
 
22
23
24
 
 
25
26
27
28
29
 
30
31
32
33
 
35
36
37
 
38
39
 
40
41
42
43
 
44
45
46
47
 
 
 
48
49
50
51
52
 
 
 
 
 
53
54
55
56
57
58
@@ -11,7 +11,7 @@
  Since the cache is updated elsewhere, we can just read from it here as if it were the store.'''     def __init__(self, ui, repo, url): - url = os.path.join(url, '.hg', bfutil.long_name) + url = os.path.join(url, '.hg', bfutil.longname)   super(localstore, self).__init__(ui, repo, util.expandpath(url))     def put(self, source, filename, hash): @@ -19,15 +19,15 @@
  return     def exists(self, hash): - return bfutil.in_system_cache(self.repo.ui, hash) + return bfutil.insystemcache(self.repo.ui, hash)     def _getfile(self, tmpfile, filename, hash): - if bfutil.in_system_cache(self.ui, hash): - return bfutil.system_cache_path(self.ui, hash) + if bfutil.insystemcache(self.ui, hash): + return bfutil.systemcachepath(self.ui, hash)   raise basestore.StoreError(filename, hash, '', _("Can't get file locally"))     def _verifyfile(self, cctx, cset, contents, standin, verified): - filename = bfutil.split_standin(standin) + filename = bfutil.splitstandin(standin)   if not filename:   return False   fctx = cctx[standin] @@ -35,25 +35,24 @@
  if key in verified:   return False   - expect_hash = fctx.data()[0:40] + expecthash = fctx.data()[0:40]   verified.add(key) - if not bfutil.in_system_cache(self.ui, expect_hash): + if not bfutil.insystemcache(self.ui, expecthash):   self.ui.warn(   _('changeset %s: %s missing\n'   ' (%s: %s)\n') - % (cset, filename, expect_hash, err.strerror)) + % (cset, filename, expecthash, err.strerror))   return True # failed     if contents: - store_path = bfutil.system_cache_path(self.ui, expect_hash) - actual_hash = bfutil.hashfile(store_path) - if actual_hash != expect_hash: + storepath = bfutil.systemcachepath(self.ui, expecthash) + actualhash = bfutil.hashfile(storepath) + if actualhash != expecthash:   self.ui.warn(   _('changeset %s: %s: contents differ\n' - ' (%s:\n' - ' expected hash %s,\n' - ' but got %s)\n') - % (cset, filename, - store_path, expect_hash, actual_hash)) + ' (%s:\n' + ' expected hash %s,\n' + ' but got %s)\n') + % (cset, filename, storepath, expecthash, actualhash))   return True # failed   return False
 
30
31
32
33
 
34
 
35
36
37
 
30
31
32
 
33
34
35
36
37
38
@@ -30,8 +30,9 @@
  stderr=subprocess.PIPE,   universal_newlines=True)   stdout, stderr = child.communicate() - versions = re.findall(r'\d+\.\d+\.\d+', stdout) + versions = re.findall(r'\d+\.\d+(?:\.\d+)?', stdout)   parts = [re.match(r'\d+', v).group(0) for v in versions[0].split('.')] +   version = [0, 0, 0]   for i, part in enumerate(map(int, parts)):   version[i] = part
 
152
153
154
155
156
157
158
159
160
161
 
231
232
233
 
234
235
236
 
242
243
244
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
153
154
 
155
 
 
156
157
158
 
228
229
230
231
232
233
234
 
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
@@ -152,10 +152,7 @@
 os.unlink('n1')  os.unlink('n2.txt')  os.unlink('dir/b3') -os.unlink('.kbf/dir/b3')  os.unlink('.kbf/b1.orig') -os.unlink('.kbf/b2.txt') -os.unlink('.kbf/dir/dir/b4.txt')    hgt.announce('revert specific files')  hgt.hg(['revert', '-r', '1', 'glob:**.txt'], @@ -231,6 +228,7 @@
 # second bfile  os.chdir('..')  os.mkdir('repo3') +os.chdir('repo3')  hgt.hg(['init', '-q'])  hgt.writefile('b1', 'b1')  hgt.hg(['add', '--bf', 'b1']) @@ -242,3 +240,64 @@
 hgt.writefile('b2', 'b22')  hgt.hg(['revert', 'b1'])  hgt.asserttrue(hgt.readfile('b2') == 'b22', 'file changed') +# Test that a newly added, uncommitted bfile can be reverted +hgt.announce('revert uncommitted files') +os.chdir('..') +os.mkdir('repo4') +os.chdir('repo4') +hgt.hg(['init', '-q']) +hgt.writefile('n1', 'n1') +hgt.hg(['add', 'n1']) +hgt.hg(['commit', '-m', 'add normal file']) +hgt.writefile('b1', 'b1') +hgt.hg(['add', '--bf', 'b1']) +hgt.hg(['revert', 'b1']) +hgt.hg(['status'], stdout='''? b1 +''') +hgt.hg(['add', 'b1']) +hgt.hg(['status'], stdout='''A b1 +''') +hgt.hg(['revert', 'b1']) +hgt.hg(['add', '--bf', 'b1']) +hgt.hg(['revert', '--all'], stdout='''forgetting .kbf/b1 +''') +hgt.hg(['status'], stdout='''? b1 +''') +hgt.hg(['add', 'b1']) +hgt.hg(['status'], stdout='''A b1 +''') +hgt.hg(['revert', 'b1']) +hgt.hg(['add', '--bf', 'b1']) +hgt.hg(['commit', '-m', 'add bfile']) +hgt.writefile('b2', 'b2') +hgt.writefile('b3', 'b3') +hgt.hg(['add', '--bf', 'b2']) +hgt.hg(['revert', 'b2']) +hgt.hg(['status'], stdout='''? b2 +? b3 +''') +hgt.hg(['add', '--bf', 'b2']) +hgt.hg(['revert', '--all'], stdout='''forgetting .kbf/b2 +''') +hgt.hg(['status'], stdout='''? b2 +? b3 +''') +hgt.hg(['add', '--bf'], stdout='''adding b2 as bfile +adding b3 as bfile +''') +hgt.hg(['revert', 'b3']) +hgt.hg(['status'], stdout='''A b2 +? b3 +''') +hgt.hg(['commit', '-m', 'add another bfile']) +hgt.hg(['rm', 'b2']) +hgt.assertfalse(os.path.exists('b2'), 'file shouldnt exist') +hgt.assertfalse(os.path.exists('.kbf/b2'), 'file shouldnt exist') +hgt.hg(['revert', 'b2']) +hgt.asserttrue(hgt.readfile('b2') == 'b2', 'file changed') +hgt.asserttrue(hgt.readfile('.kbf/b2') == '32f28ea03b1b20126629d2ca63fc6665b0bbb604\n', 'file changed') +hgt.hg(['rm', 'b2']) +hgt.hg(['revert', '--all'], stdout='''undeleting .kbf/b2 +''') +hgt.asserttrue(hgt.readfile('b2') == 'b2', 'file changed') +hgt.asserttrue(hgt.readfile('.kbf/b2') == '32f28ea03b1b20126629d2ca63fc6665b0bbb604\n', 'file changed')
 
35
36
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
@@ -35,3 +35,36 @@
 hg add --bf b2  hg commit -m 'added second bfile'  hg revert b1 + +% revert uncommitted files +hg init -q +hg add n1 +hg commit -m 'add normal file' +hg add --bf b1 +hg revert b1 +hg status +hg add b1 +hg status +hg revert b1 +hg add --bf b1 +hg revert --all +hg status +hg add b1 +hg status +hg revert b1 +hg add --bf b1 +hg commit -m 'add bfile' +hg add --bf b2 +hg revert b2 +hg status +hg add --bf b2 +hg revert --all +hg status +hg add --bf +hg revert b3 +hg status +hg commit -m 'add another bfile' +hg rm b2 +hg revert b2 +hg rm b2 +hg revert --all
Change 1 of 1 Show Entire File bfiles/​tests/​test-rollback.py Stacked
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
@@ -0,0 +1,53 @@
+#!/usr/bin/env python +# +# Test rollback + +import os +import common + +hgt = common.BfilesTester() + +hgt.updaterc() +hgt.announce('setup') +os.mkdir('repo1') +os.chdir('repo1') +hgt.hg(['init', '-q']) +hgt.writefile('b1', 'b1') +hgt.hg(['add', '--bf', 'b1']) +hgt.hg(['commit', '-m', 'add bfile']) +hgt.hg(['rollback'], + stdout='''repository tip rolled back to revision -1 (undo commit) +working directory now based on revision -1 +''') +hgt.hg(['status'], stdout='''A b1 +''') +hgt.hg(['commit', '-m', 'add bfile']) +hgt.writefile('b2', 'b2') +hgt.hg(['add', '--bf', 'b2']) +hgt.hg(['commit', '-m', 'add another bfile']) +hgt.hg(['rollback'], + stdout='''repository tip rolled back to revision 0 (undo commit) +working directory now based on revision 0 +''') +hgt.hg(['status'], stdout='''A b2 +''') +hgt.hg(['commit', '-m', 'add another bfile']) +hgt.writefile('b2', 'b22') +hgt.hg(['commit', '-m', 'modify bfile']) +hgt.hg(['rollback'], + stdout='''repository tip rolled back to revision 1 (undo commit) +working directory now based on revision 1 +''') +hgt.hg(['status'], stdout='''M b2 +''') +hgt.asserttrue(hgt.readfile('b2') == 'b22', 'file changed') +hgt.hg(['commit', '-m', 'modify bfile']) +hgt.hg(['rm', 'b2']) +hgt.hg(['commit', '-m', 'delete bfile']) +hgt.hg(['rollback'], + stdout='''repository tip rolled back to revision 2 (undo commit) +working directory now based on revision 2 +''') +hgt.hg(['status'], stdout='''! b2 +''') +hgt.asserttrue(hgt.readfile('.kbf/b2') == 'ad280552ca89b1d13baa498ef352e1eabaafdf28\n', 'file changed')
Change 1 of 1 Show Entire File bfiles/​tests/​test-rollback.py.out Stacked
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
@@ -0,0 +1,20 @@
+% setup +hg init -q +hg add --bf b1 +hg commit -m 'add bfile' +hg rollback +hg status +hg commit -m 'add bfile' +hg add --bf b2 +hg commit -m 'add another bfile' +hg rollback +hg status +hg commit -m 'add another bfile' +hg commit -m 'modify bfile' +hg rollback +hg status +hg commit -m 'modify bfile' +hg rm b2 +hg commit -m 'delete bfile' +hg rollback +hg status