@@ -660,24 +660,12 @@ type Options struct {
660660 // compaction will never get triggered.
661661 MultiLevelCompactionHeuristic MultiLevelHeuristic
662662
663- // MaxWriterConcurrency is used to indicate the maximum number of
664- // compression workers the compression queue is allowed to use. If
665- // MaxWriterConcurrency > 0, then the Writer will use parallelism, to
666- // compress and write blocks to disk. Otherwise, the writer will
667- // compress and write blocks to disk synchronously.
668- MaxWriterConcurrency int
669-
670663 // ForceWriterParallelism is used to force parallelism in the sstable
671664 // Writer for the metamorphic tests. Even with the MaxWriterConcurrency
672665 // option set, we only enable parallelism in the sstable Writer if there
673666 // is enough CPU available, and this option bypasses that.
674667 ForceWriterParallelism bool
675668
676- // CPUWorkPermissionGranter should be set if Pebble should be given the
677- // ability to optionally schedule additional CPU. See the documentation
678- // for CPUWorkPermissionGranter for more details.
679- CPUWorkPermissionGranter CPUWorkPermissionGranter
680-
681669 // EnableColumnarBlocks is used to decide whether to enable writing
682670 // TableFormatPebblev5 sstables. This setting is only respected by
683671 // FormatColumnarBlocks. In lower format major versions, the
@@ -1287,9 +1275,6 @@ func (o *Options) EnsureDefaults() {
12871275 if o .Experimental .FileCacheShards <= 0 {
12881276 o .Experimental .FileCacheShards = runtime .GOMAXPROCS (0 )
12891277 }
1290- if o .Experimental .CPUWorkPermissionGranter == nil {
1291- o .Experimental .CPUWorkPermissionGranter = defaultCPUWorkGranter {}
1292- }
12931278 if o .Experimental .MultiLevelCompactionHeuristic == nil {
12941279 o .Experimental .MultiLevelCompactionHeuristic = WriteAmpHeuristic {}
12951280 }
@@ -1433,7 +1418,6 @@ func (o *Options) String() string {
14331418 fmt .Fprintf (& buf , " validate_on_ingest=%t\n " , o .Experimental .ValidateOnIngest )
14341419 fmt .Fprintf (& buf , " wal_dir=%s\n " , o .WALDir )
14351420 fmt .Fprintf (& buf , " wal_bytes_per_sync=%d\n " , o .WALBytesPerSync )
1436- fmt .Fprintf (& buf , " max_writer_concurrency=%d\n " , o .Experimental .MaxWriterConcurrency )
14371421 fmt .Fprintf (& buf , " force_writer_parallelism=%t\n " , o .Experimental .ForceWriterParallelism )
14381422 fmt .Fprintf (& buf , " secondary_cache_size_bytes=%d\n " , o .Experimental .SecondaryCacheSizeBytes )
14391423 fmt .Fprintf (& buf , " create_on_shared=%d\n " , o .Experimental .CreateOnShared )
@@ -1833,7 +1817,7 @@ func (o *Options) Parse(s string, hooks *ParseHooks) error {
18331817 case "wal_bytes_per_sync" :
18341818 o .WALBytesPerSync , err = strconv .Atoi (value )
18351819 case "max_writer_concurrency" :
1836- o . Experimental . MaxWriterConcurrency , err = strconv . Atoi ( value )
1820+ // No longer implemented; ignore.
18371821 case "force_writer_parallelism" :
18381822 o .Experimental .ForceWriterParallelism , err = strconv .ParseBool (value )
18391823 case "secondary_cache_size_bytes" :
0 commit comments