introvoyz041 commited on
Commit
06c5f80
·
verified ·
1 Parent(s): c381ac2

Migrated from GitHub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +83 -0
  2. data/.clang-format +121 -0
  3. data/CMakeLists.txt +653 -0
  4. data/CMakePresets.json +46 -0
  5. data/LICENSE +661 -0
  6. data/build-utils/CMakeLists.txt +50 -0
  7. data/build-utils/encoding-check.cpp +119 -0
  8. data/build_win.bat +515 -0
  9. data/bundled_deps/CMakeLists.txt +52 -0
  10. data/bundled_deps/admesh/CMakeLists.txt +16 -0
  11. data/bundled_deps/admesh/admesh/connect.cpp +743 -0
  12. data/bundled_deps/admesh/admesh/normals.cpp +239 -0
  13. data/bundled_deps/admesh/admesh/shared.cpp +288 -0
  14. data/bundled_deps/admesh/admesh/stl.h +343 -0
  15. data/bundled_deps/admesh/admesh/stl_io.cpp +251 -0
  16. data/bundled_deps/admesh/admesh/stlinit.cpp +281 -0
  17. data/bundled_deps/admesh/admesh/util.cpp +399 -0
  18. data/bundled_deps/agg/CMakeLists.txt +5 -0
  19. data/bundled_deps/agg/agg/AUTHORS +2 -0
  20. data/bundled_deps/agg/agg/VERSION +2 -0
  21. data/bundled_deps/agg/agg/agg_array.h +1119 -0
  22. data/bundled_deps/agg/agg/agg_basics.h +574 -0
  23. data/bundled_deps/agg/agg/agg_bezier_arc.h +159 -0
  24. data/bundled_deps/agg/agg/agg_clip_liang_barsky.h +333 -0
  25. data/bundled_deps/agg/agg/agg_color_gray.h +1047 -0
  26. data/bundled_deps/agg/agg/agg_color_rgba.h +1353 -0
  27. data/bundled_deps/agg/agg/agg_config.h +44 -0
  28. data/bundled_deps/agg/agg/agg_conv_transform.h +68 -0
  29. data/bundled_deps/agg/agg/agg_gamma_functions.h +132 -0
  30. data/bundled_deps/agg/agg/agg_gamma_lut.h +300 -0
  31. data/bundled_deps/agg/agg/agg_math.h +437 -0
  32. data/bundled_deps/agg/agg/agg_path_storage.h +1582 -0
  33. data/bundled_deps/agg/agg/agg_pixfmt_base.h +97 -0
  34. data/bundled_deps/agg/agg/agg_pixfmt_gray.h +738 -0
  35. data/bundled_deps/agg/agg/agg_pixfmt_rgb.h +995 -0
  36. data/bundled_deps/agg/agg/agg_rasterizer_cells_aa.h +741 -0
  37. data/bundled_deps/agg/agg/agg_rasterizer_scanline_aa.h +481 -0
  38. data/bundled_deps/agg/agg/agg_rasterizer_scanline_aa_nogamma.h +483 -0
  39. data/bundled_deps/agg/agg/agg_rasterizer_sl_clip.h +351 -0
  40. data/bundled_deps/agg/agg/agg_renderer_base.h +731 -0
  41. data/bundled_deps/agg/agg/agg_renderer_scanline.h +854 -0
  42. data/bundled_deps/agg/agg/agg_rendering_buffer.h +300 -0
  43. data/bundled_deps/agg/agg/agg_scanline_p.h +329 -0
  44. data/bundled_deps/agg/agg/agg_trans_affine.h +518 -0
  45. data/bundled_deps/agg/agg/copying +65 -0
  46. data/bundled_deps/ankerl/README.txt +7 -0
  47. data/bundled_deps/ankerl/ankerl/unordered_dense.h +1584 -0
  48. data/bundled_deps/avrdude/CMakeLists.txt +107 -0
  49. data/bundled_deps/avrdude/avrdude/AUTHORS +28 -0
  50. data/bundled_deps/avrdude/avrdude/BUILD-FROM-SVN +13 -0
.gitattributes CHANGED
@@ -57,3 +57,86 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ data/bundled_deps/avrdude/avrdude/atmel-docs/AVR910.pdf filter=lfs diff=lfs merge=lfs -text
61
+ data/bundled_deps/avrdude/avrdude/atmel-docs/AVRISPmkII-AVR069.pdf filter=lfs diff=lfs merge=lfs -text
62
+ data/bundled_deps/avrdude/avrdude/atmel-docs/JTAGICE-AVR060.pdf filter=lfs diff=lfs merge=lfs -text
63
+ data/bundled_deps/avrdude/avrdude/atmel-docs/JTAGICEmkII-AVR067.pdf filter=lfs diff=lfs merge=lfs -text
64
+ data/bundled_deps/avrdude/avrdude/atmel-docs/STK500-AVR061.pdf filter=lfs diff=lfs merge=lfs -text
65
+ data/bundled_deps/avrdude/avrdude/atmel-docs/STK500v2-AVR068.pdf filter=lfs diff=lfs merge=lfs -text
66
+ data/deps/+GMP/gmp/lib/win32/libgmp-10.dll filter=lfs diff=lfs merge=lfs -text
67
+ data/deps/+GMP/gmp/lib/win32/libgmp-10.lib filter=lfs diff=lfs merge=lfs -text
68
+ data/deps/+GMP/gmp/lib/win64/libgmp-10.dll filter=lfs diff=lfs merge=lfs -text
69
+ data/deps/+GMP/gmp/lib/win64/libgmp-10.lib filter=lfs diff=lfs merge=lfs -text
70
+ data/deps/+MPFR/mpfr/lib/win32/libmpfr-4.dll filter=lfs diff=lfs merge=lfs -text
71
+ data/deps/+MPFR/mpfr/lib/win64/libmpfr-4.dll filter=lfs diff=lfs merge=lfs -text
72
+ data/resources/data/printer_gantries/prusa3d_coreone_gantry.stl filter=lfs diff=lfs merge=lfs -text
73
+ data/resources/data/printer_gantries/prusa3d_mini_gantry.stl filter=lfs diff=lfs merge=lfs -text
74
+ data/resources/data/printer_gantries/prusa3d_mk3s_gantry.stl filter=lfs diff=lfs merge=lfs -text
75
+ data/resources/data/printer_gantries/prusa3d_mk4_gantry.stl filter=lfs diff=lfs merge=lfs -text
76
+ data/resources/data/printer_gantries/prusa3d_mk4s_gantry.stl filter=lfs diff=lfs merge=lfs -text
77
+ data/resources/data/printer_gantries/prusa3d_xl_gantry.stl filter=lfs diff=lfs merge=lfs -text
78
+ data/resources/fonts/NotoSans-Regular.ttf filter=lfs diff=lfs merge=lfs -text
79
+ data/resources/fonts/NotoSansCJK-Regular.ttc filter=lfs diff=lfs merge=lfs -text
80
+ data/resources/icons/PrusaSlicer-gcodeviewer.ico filter=lfs diff=lfs merge=lfs -text
81
+ data/resources/icons/PrusaSlicer.icns filter=lfs diff=lfs merge=lfs -text
82
+ data/resources/icons/PrusaSlicer.ico filter=lfs diff=lfs merge=lfs -text
83
+ data/resources/icons/gcode.icns filter=lfs diff=lfs merge=lfs -text
84
+ data/resources/localization/be/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
85
+ data/resources/localization/ca/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
86
+ data/resources/localization/cs/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
87
+ data/resources/localization/de/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
88
+ data/resources/localization/es/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
89
+ data/resources/localization/fi/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
90
+ data/resources/localization/fr/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
91
+ data/resources/localization/hu/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
92
+ data/resources/localization/it/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
93
+ data/resources/localization/ja/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
94
+ data/resources/localization/ko/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
95
+ data/resources/localization/ko_KR/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
96
+ data/resources/localization/ko_KR/PrusaSlicer_ko_KR.mo filter=lfs diff=lfs merge=lfs -text
97
+ data/resources/localization/nl/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
98
+ data/resources/localization/pl/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
99
+ data/resources/localization/pt_BR/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
100
+ data/resources/localization/ru/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
101
+ data/resources/localization/sl/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
102
+ data/resources/localization/tr/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
103
+ data/resources/localization/uk/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
104
+ data/resources/localization/zh_CN/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
105
+ data/resources/localization/zh_TW/PrusaSlicer.mo filter=lfs diff=lfs merge=lfs -text
106
+ data/resources/profiles/Anycubic/AKLP_Bed.stl filter=lfs diff=lfs merge=lfs -text
107
+ data/resources/profiles/Anycubic/AK_Bed.stl filter=lfs diff=lfs merge=lfs -text
108
+ data/resources/profiles/Artillery/bed-x1.stl filter=lfs diff=lfs merge=lfs -text
109
+ data/resources/profiles/BIQU/BX_Bed.stl filter=lfs diff=lfs merge=lfs -text
110
+ data/resources/profiles/CocoaPress/minichef_bed.stl filter=lfs diff=lfs merge=lfs -text
111
+ data/resources/profiles/FLSun/Q5_bed.stl filter=lfs diff=lfs merge=lfs -text
112
+ data/resources/profiles/FLSun/QQSP_bed.stl filter=lfs diff=lfs merge=lfs -text
113
+ data/resources/profiles/Geeetech/105x105.stl filter=lfs diff=lfs merge=lfs -text
114
+ data/resources/profiles/HartSmartProducts/hsp1_i_bed.stl filter=lfs diff=lfs merge=lfs -text
115
+ data/resources/profiles/HartSmartProducts/hsp1_i_duplicator_bed.stl filter=lfs diff=lfs merge=lfs -text
116
+ data/resources/profiles/LNL3D/LNL3D_D3_V2_bed.stl filter=lfs diff=lfs merge=lfs -text
117
+ data/resources/profiles/LNL3D/LNL3D_D3_VULCAN_bed.stl filter=lfs diff=lfs merge=lfs -text
118
+ data/resources/profiles/LNL3D/LNL3D_D3_bed.stl filter=lfs diff=lfs merge=lfs -text
119
+ data/resources/profiles/LNL3D/LNL3D_D5_bed.stl filter=lfs diff=lfs merge=lfs -text
120
+ data/resources/profiles/LNL3D/LNL3D_D6_bed.stl filter=lfs diff=lfs merge=lfs -text
121
+ data/resources/profiles/MakerGear/M2_M3.stl filter=lfs diff=lfs merge=lfs -text
122
+ data/resources/profiles/PrusaResearch/coreone_bed.stl filter=lfs diff=lfs merge=lfs -text
123
+ data/resources/profiles/PrusaResearchSLA/sl1_bed.stl filter=lfs diff=lfs merge=lfs -text
124
+ data/resources/profiles/PrusaResearchSLA/sl1s_bed.stl filter=lfs diff=lfs merge=lfs -text
125
+ data/resources/profiles/RatRig/rr-vc-300.stl filter=lfs diff=lfs merge=lfs -text
126
+ data/resources/profiles/RatRig/rr-vc-400.stl filter=lfs diff=lfs merge=lfs -text
127
+ data/resources/profiles/RatRig/rr-vc-500.stl filter=lfs diff=lfs merge=lfs -text
128
+ data/resources/profiles/Rigid3D/mucit2_bed.stl filter=lfs diff=lfs merge=lfs -text
129
+ data/resources/profiles/Rigid3D/mucit_bed.stl filter=lfs diff=lfs merge=lfs -text
130
+ data/resources/profiles/Rigid3D/zero_bed.stl filter=lfs diff=lfs merge=lfs -text
131
+ data/resources/profiles/Sovol/SV06_bed.stl filter=lfs diff=lfs merge=lfs -text
132
+ data/resources/profiles/TriLAB/aq_bed.stl filter=lfs diff=lfs merge=lfs -text
133
+ data/resources/profiles/TriLAB/aqp_bed.stl filter=lfs diff=lfs merge=lfs -text
134
+ data/resources/profiles/TriLAB/dq2_bed.stl filter=lfs diff=lfs merge=lfs -text
135
+ data/resources/profiles/Trimaker/CosmosII_bed.stl filter=lfs diff=lfs merge=lfs -text
136
+ data/resources/profiles/Trimaker/NebulaCloud_bed.stl filter=lfs diff=lfs merge=lfs -text
137
+ data/resources/profiles/gCreate/gmax15p_bed.stl filter=lfs diff=lfs merge=lfs -text
138
+ data/resources/profiles/gCreate/gmax2_bed.stl filter=lfs diff=lfs merge=lfs -text
139
+ data/resources/shapes/3DBenchy.stl filter=lfs diff=lfs merge=lfs -text
140
+ data/resources/shapes/M3x10_screw.stl filter=lfs diff=lfs merge=lfs -text
141
+ data/resources/shapes/torus.stl filter=lfs diff=lfs merge=lfs -text
142
+ data/tests/data/seam_test_object.3mf filter=lfs diff=lfs merge=lfs -text
data/.clang-format ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # http://clang.llvm.org/docs/ClangFormatStyleOptions.html
3
+ #
4
+ Language: Cpp
5
+ AccessModifierOffset: -4
6
+ AlignAfterOpenBracket: BlockIndent
7
+ AlignConsecutiveAssignments: false
8
+ AlignConsecutiveDeclarations: false
9
+ AlignEscapedNewlines: DontAlign
10
+ AlignOperands: false
11
+ AlignTrailingComments: true
12
+ AllowAllParametersOfDeclarationOnNextLine: true
13
+ AllowShortBlocksOnASingleLine: true
14
+ AllowShortCaseLabelsOnASingleLine: true
15
+ AllowShortFunctionsOnASingleLine: All
16
+ AllowShortIfStatementsOnASingleLine: false
17
+ AllowShortLoopsOnASingleLine: false
18
+ AlwaysBreakAfterDefinitionReturnType: None
19
+ AlwaysBreakAfterReturnType: None
20
+ AlwaysBreakBeforeMultilineStrings: false
21
+ AlwaysBreakTemplateDeclarations: false
22
+ BinPackArguments: true
23
+ BinPackParameters: false
24
+ BraceWrapping:
25
+ AfterClass: true
26
+ AfterControlStatement: false
27
+ AfterEnum: false
28
+ AfterFunction: false
29
+ AfterNamespace: false
30
+ AfterObjCDeclaration: false
31
+ AfterStruct: true
32
+ AfterUnion: false
33
+ AfterExternBlock: false
34
+ BeforeCatch: false
35
+ BeforeElse: false
36
+ IndentBraces: false
37
+ SplitEmptyFunction: false
38
+ SplitEmptyRecord: false
39
+ SplitEmptyNamespace: false
40
+ BreakBeforeBinaryOperators: None
41
+ BreakBeforeBraces: Custom
42
+ BreakBeforeInheritanceComma: false
43
+ BreakInheritanceList: BeforeColon
44
+ BreakBeforeTernaryOperators: false
45
+ BreakConstructorInitializersBeforeComma: false
46
+ BreakConstructorInitializers: BeforeComma
47
+ BreakAfterJavaFieldAnnotations: false
48
+ BreakStringLiterals: true
49
+ ColumnLimit: 100
50
+ CommentPragmas: '^ IWYU pragma:'
51
+ CompactNamespaces: true
52
+ ConstructorInitializerAllOnOneLineOrOnePerLine: true
53
+ ConstructorInitializerIndentWidth: 4
54
+ ContinuationIndentWidth: 4
55
+ Cpp11BracedListStyle: true
56
+ DerivePointerAlignment: false
57
+ DisableFormat: false
58
+ ExperimentalAutoDetectBinPacking: false
59
+ FixNamespaceComments: true
60
+ ForEachMacros:
61
+ - forever
62
+ - foreach
63
+ - Q_FOREACH
64
+ - BOOST_FOREACH
65
+ IncludeBlocks: Preserve
66
+ IncludeCategories:
67
+ - Regex: '^<Q.*'
68
+ Priority: 200
69
+ - Regex: '^(<|"(gtest|gmock|isl|json)/)'
70
+ Priority: 3
71
+ - Regex: '.*'
72
+ Priority: 1
73
+ IncludeIsMainRegex: '(Test)?$'
74
+ IndentCaseLabels: false
75
+ IndentPPDirectives: None
76
+ IndentWidth: 4
77
+ IndentWrappedFunctionNames: false
78
+ JavaScriptQuotes: Leave
79
+ JavaScriptWrapImports: true
80
+ #KeepLineBreaksForNonEmptyLines: false
81
+ KeepEmptyLinesAtTheStartOfBlocks: false
82
+ MacroBlockBegin: ''
83
+ MacroBlockEnd: ''
84
+ MaxEmptyLinesToKeep: 1
85
+ NamespaceIndentation: None
86
+ ObjCBinPackProtocolList: Auto
87
+ ObjCBlockIndentWidth: 4
88
+ ObjCSpaceAfterProperty: false
89
+ ObjCSpaceBeforeProtocolList: true
90
+ PenaltyBreakAssignment: 150
91
+ PenaltyBreakBeforeFirstCallParameter: 300
92
+ PenaltyBreakComment: 500
93
+ PenaltyBreakFirstLessLess: 400
94
+ PenaltyBreakString: 600
95
+ PenaltyBreakTemplateDeclaration: 10
96
+ PenaltyExcessCharacter: 50
97
+ PenaltyReturnTypeOnItsOwnLine: 300
98
+ PenaltyIndentedWhitespace: 10
99
+ PointerAlignment: Right
100
+ ReflowComments: true
101
+ SortIncludes: false
102
+ SortUsingDeclarations: false
103
+ SpaceAfterCStyleCast: true
104
+ SpaceAfterTemplateKeyword: false
105
+ SpaceBeforeAssignmentOperators: true
106
+ SpaceBeforeCpp11BracedList: false
107
+ SpaceBeforeCtorInitializerColon: true
108
+ SpaceBeforeInheritanceColon: true
109
+ SpaceBeforeParens: ControlStatements
110
+ SpaceBeforeRangeBasedForLoopColon: true
111
+ SpaceInEmptyParentheses: false
112
+ SpacesBeforeTrailingComments: 1
113
+ SpacesInAngles: false
114
+ SpacesInContainerLiterals: false
115
+ SpacesInCStyleCastParentheses: false
116
+ SpacesInParentheses: false
117
+ SpacesInSquareBrackets: false
118
+ Standard: Cpp11
119
+ TabWidth: 4
120
+ UseTab: Never
121
+
data/CMakeLists.txt ADDED
@@ -0,0 +1,653 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #/|/ Copyright (c) Prusa Research 2017 - 2023 Tomáš Mészáros @tamasmeszaros, Vojtěch Bubník @bubnikv, Lukáš Matěna @lukasmatena, Filip Sykala @Jony01, Oleksandra Iushchenko @YuSanka, Lukáš Hejl @hejllukas, David Kocík @kocikdav, Enrico Turri @enricoturri1966, Vojtěch Král @vojtechkral
2
+ #/|/ Copyright (c) 2023 Ben Greiner
3
+ #/|/ Copyright (c) 2021 D-mo @dimitry-ishenko
4
+ #/|/ Copyright (c) 2020 Pascal de Bruijn @pmjdebruijn
5
+ #/|/ Copyright (c) 2019 Sam Segers
6
+ #/|/ Copyright (c) 2019 Colin Gilgenbach @hexane360
7
+ #/|/ Copyright (c) 2018 Dan Kortschak
8
+ #/|/
9
+ #/|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
10
+ #/|/
11
+ cmake_minimum_required(VERSION 3.13)
12
+ project(PrusaSlicer)
13
+
14
+ include("version.inc")
15
+ include(GNUInstallDirs)
16
+ include(CMakeDependentOption)
17
+
18
+ set(SLIC3R_RESOURCES_DIR "${CMAKE_CURRENT_SOURCE_DIR}/resources")
19
+ file(TO_NATIVE_PATH "${SLIC3R_RESOURCES_DIR}" SLIC3R_RESOURCES_DIR_WIN)
20
+
21
+ if (NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
22
+ message(STATUS "No build type selected, default to Release")
23
+ set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Build type (default Release)" FORCE)
24
+ endif()
25
+
26
+ if(DEFINED ENV{SLIC3R_STATIC})
27
+ set(SLIC3R_STATIC_INITIAL $ENV{SLIC3R_STATIC})
28
+ else()
29
+ if (MSVC OR MINGW OR APPLE)
30
+ set(SLIC3R_STATIC_INITIAL 1)
31
+ else()
32
+ set(SLIC3R_STATIC_INITIAL 0)
33
+ endif()
34
+ endif()
35
+
36
+ option(SLIC3R_STATIC "Compile PrusaSlicer with static libraries (Boost, TBB, glew)" ${SLIC3R_STATIC_INITIAL})
37
+ option(SLIC3R_GUI "Compile PrusaSlicer with GUI components (OpenGL, wxWidgets)" 1)
38
+ option(SLIC3R_FHS "Assume PrusaSlicer is to be installed in a FHS directory structure" 0)
39
+ option(SLIC3R_PCH "Use precompiled headers" 1)
40
+ option(SLIC3R_MSVC_COMPILE_PARALLEL "Compile on Visual Studio in parallel" 1)
41
+ option(SLIC3R_ASAN "Enable ASan on Clang and GCC" 0)
42
+ option(SLIC3R_UBSAN "Enable UBSan on Clang and GCC" 0)
43
+ option(SLIC3R_ENABLE_FORMAT_STEP "Enable compilation of STEP file support" ON)
44
+ option(SLIC3R_LOG_TO_FILE "Enable logging into file")
45
+ option(SLIC3R_REPO_URL "Preset repo URL")
46
+
47
+ # SLIC3R_OPENGL_ES can be enabled only if SLIC3R_GUI is enabled.
48
+ CMAKE_DEPENDENT_OPTION(SLIC3R_OPENGL_ES "Compile PrusaSlicer targeting OpenGL ES" OFF "SLIC3R_GUI" OFF)
49
+
50
+ # If SLIC3R_FHS is 1 -> SLIC3R_DESKTOP_INTEGRATION is always 0, otherwise variable.
51
+ CMAKE_DEPENDENT_OPTION(SLIC3R_DESKTOP_INTEGRATION "Allow perfoming desktop integration during runtime" 1 "NOT SLIC3R_FHS" 0)
52
+
53
+ set(OPENVDB_FIND_MODULE_PATH "" CACHE PATH "Path to OpenVDB installation's find modules.")
54
+
55
+ set(SLIC3R_GTK "2" CACHE STRING "GTK version to use with wxWidgets on Linux")
56
+
57
+ set(IS_CROSS_COMPILE FALSE)
58
+
59
+ if (SLIC3R_STATIC)
60
+ # Prefer config scripts over find modules. This is helpful when building with
61
+ # the static dependencies. Many libraries have their own export scripts
62
+ # while having a Find<PkgName> module in standard cmake installation.
63
+ # (e.g. CURL)
64
+ set(CMAKE_FIND_PACKAGE_PREFER_CONFIG ON)
65
+ endif ()
66
+
67
+ # Dependency build management
68
+ option(${PROJECT_NAME}_BUILD_DEPS "Build dependencies before the project" OFF)
69
+ option(${PROJECT_NAME}_DEPS_OUTPUT_QUIET "Don't print build output for dependencies" OFF)
70
+ set(${PROJECT_NAME}_DEPS_PRESET "default" CACHE STRING "Preset of the dependencies when ${PROJECT_NAME}_BUILD_DEPS is ON")
71
+ set(${PROJECT_NAME}_DEPS_BUILD_DIR "" CACHE PATH "Binary dir of the dependencies build when ${PROJECT_NAME}_BUILD_DEPS is ON")
72
+ if (${PROJECT_NAME}_BUILD_DEPS)
73
+ include(deps/autobuild.cmake)
74
+ endif ()
75
+
76
+ if (APPLE)
77
+ set(CMAKE_FIND_FRAMEWORK LAST)
78
+ set(CMAKE_FIND_APPBUNDLE LAST)
79
+ list(FIND CMAKE_OSX_ARCHITECTURES ${CMAKE_SYSTEM_PROCESSOR} _arch_idx)
80
+ if (CMAKE_OSX_ARCHITECTURES AND _arch_idx LESS 0)
81
+ set(IS_CROSS_COMPILE TRUE)
82
+ endif ()
83
+ endif ()
84
+
85
+ option(SLIC3R_BUILD_SANDBOXES "Build development sandboxes" OFF)
86
+ option(SLIC3R_BUILD_TESTS "Build unit tests" ON)
87
+
88
+ if (IS_CROSS_COMPILE)
89
+ message("Detected cross compilation setup. Tests and encoding checks will be forcedly disabled!")
90
+ set(SLIC3R_BUILD_TESTS OFF CACHE BOOL "" FORCE)
91
+ endif ()
92
+
93
+ # Print out the SLIC3R_* cache options
94
+ get_cmake_property(_cache_vars CACHE_VARIABLES)
95
+ list (SORT _cache_vars)
96
+ foreach (_cache_var ${_cache_vars})
97
+ if("${_cache_var}" MATCHES "^SLIC3R_")
98
+ message(STATUS "${_cache_var}: ${${_cache_var}}")
99
+ endif ()
100
+ endforeach()
101
+
102
+ if (SLIC3R_LOG_TO_FILE)
103
+ add_definitions(-DSLIC3R_LOG_TO_FILE)
104
+ endif ()
105
+ if (SLIC3R_REPO_URL)
106
+ add_definitions(-DSLIC3R_REPO_URL="${SLIC3R_REPO_URL}")
107
+ endif()
108
+ if (SLIC3R_GUI)
109
+ add_definitions(-DSLIC3R_GUI)
110
+ endif ()
111
+
112
+ if (SLIC3R_OPENGL_ES)
113
+ add_definitions(-DSLIC3R_OPENGL_ES)
114
+ endif()
115
+
116
+ if(SLIC3R_DESKTOP_INTEGRATION)
117
+ add_definitions(-DSLIC3R_DESKTOP_INTEGRATION)
118
+ endif ()
119
+
120
+ if (MSVC AND CMAKE_CXX_COMPILER_ID STREQUAL Clang)
121
+ set(IS_CLANG_CL TRUE)
122
+
123
+ # clang-cl can interpret SYSTEM header paths if -imsvc is used
124
+ set(CMAKE_INCLUDE_SYSTEM_FLAG_CXX "-imsvc")
125
+
126
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall \
127
+ -Wno-old-style-cast -Wno-reserved-id-macro -Wno-c++98-compat-pedantic")
128
+ else ()
129
+ set(IS_CLANG_CL FALSE)
130
+ endif ()
131
+
132
+ if (MSVC)
133
+ if (SLIC3R_MSVC_COMPILE_PARALLEL AND NOT IS_CLANG_CL)
134
+ add_compile_options(/MP)
135
+ endif ()
136
+ # /bigobj (Increase Number of Sections in .Obj file)
137
+ # error C3859: virtual memory range for PCH exceeded; please recompile with a command line option of '-Zm90' or greater
138
+ # Generate symbols at every build target, even for the release.
139
+ add_compile_options(-bigobj -Zm520 /Zi)
140
+ # Disable STL4007: Many result_type typedefs and all argument_type, first_argument_type, and second_argument_type typedefs are deprecated in C++17.
141
+ #FIXME Remove this line after eigen library adapts to the new C++17 adaptor rules.
142
+ add_compile_options(-D_SILENCE_CXX17_ADAPTOR_TYPEDEFS_DEPRECATION_WARNING)
143
+ # Disable warnings on conversion from unsigned to signed (possible loss of data)
144
+ # C4244: 'conversion' conversion from 'type1' to 'type2', possible loss of data. An integer type is converted to a smaller integer type.
145
+ # C4267: The compiler detected a conversion from size_t to a smaller type.
146
+ add_compile_options(/wd4244 /wd4267)
147
+ # Enforce strict C++ conformance, so our code that compiles on MSVC also compiles on GCC and clang.
148
+ add_compile_options(/permissive-)
149
+ endif ()
150
+
151
+ if (MINGW)
152
+ add_compile_options(-Wa,-mbig-obj)
153
+ endif ()
154
+
155
+ if (NOT MSVC)
156
+ # ARMs (Raspberry PI) use an unsigned char by default. Let's make it consistent for PrusaSlicer on all platforms.
157
+ add_compile_options(-fsigned-char)
158
+ endif ()
159
+
160
+ # Display and check CMAKE_PREFIX_PATH
161
+ message(STATUS "SLIC3R_STATIC: ${SLIC3R_STATIC}")
162
+ if (NOT "${CMAKE_PREFIX_PATH}" STREQUAL "")
163
+ message(STATUS "CMAKE_PREFIX_PATH: ${CMAKE_PREFIX_PATH} (from cache or command line)")
164
+ set(PREFIX_PATH_CHECK ${CMAKE_PREFIX_PATH})
165
+ elseif (NOT "$ENV{CMAKE_PREFIX_PATH}" STREQUAL "")
166
+ message(STATUS "CMAKE_PREFIX_PATH: $ENV{CMAKE_PREFIX_PATH} (from environment)")
167
+ set(PREFIX_PATH_CHECK $ENV{CMAKE_PREFIX_PATH})
168
+ else ()
169
+ message(STATUS "CMAKE_PREFIX_PATH: (default)")
170
+ endif ()
171
+
172
+ foreach (DIR ${PREFIX_PATH_CHECK})
173
+ if (NOT EXISTS "${DIR}")
174
+ message(WARNING "CMAKE_PREFIX_PATH element doesn't exist: ${DIR}")
175
+ endif ()
176
+ endforeach ()
177
+
178
+ # Add our own cmake module path.
179
+ list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/modules/)
180
+
181
+ enable_testing ()
182
+
183
+ # Enable C++17 language standard.
184
+ set(CMAKE_CXX_STANDARD 17)
185
+ set(CMAKE_CXX_STANDARD_REQUIRED ON)
186
+
187
+ if(NOT WIN32)
188
+ # Add DEBUG flags to debug builds.
189
+ add_compile_options("$<$<CONFIG:DEBUG>:-DDEBUG>")
190
+ endif()
191
+
192
+ # WIN10SDK_PATH is used to point CMake to the WIN10 SDK installation directory.
193
+ # We pick it from environment if it is not defined in another way
194
+ if(WIN32)
195
+ if(NOT DEFINED WIN10SDK_PATH)
196
+ if(DEFINED ENV{WIN10SDK_PATH})
197
+ set(WIN10SDK_PATH "$ENV{WIN10SDK_PATH}")
198
+ endif()
199
+ endif()
200
+ if(DEFINED WIN10SDK_PATH)
201
+ if (EXISTS "${WIN10SDK_PATH}/include/winrt/windows.graphics.printing3d.h")
202
+ set(WIN10SDK_INCLUDE_PATH "${WIN10SDK_PATH}/Include")
203
+ else()
204
+ message("WIN10SDK_PATH is invalid: ${WIN10SDK_PATH}")
205
+ message("${WIN10SDK_PATH}/include/winrt/windows.graphics.printing3d.h was not found")
206
+ message("STL fixing by WinSDK will not be compiled")
207
+ unset(WIN10SDK_PATH)
208
+ endif()
209
+ else()
210
+ # Try to use the default Windows 10 SDK path.
211
+ set(WIN10SDK_INCLUDE_PATH "$ENV{WindowsSdkDir}/Include/$ENV{WindowsSDKVersion}")
212
+ if (NOT EXISTS "${WIN10SDK_INCLUDE_PATH}/winrt/windows.graphics.printing3d.h")
213
+ message("${WIN10SDK_INCLUDE_PATH}/winrt/windows.graphics.printing3d.h was not found")
214
+ message("STL fixing by WinSDK will not be compiled")
215
+ unset(WIN10SDK_INCLUDE_PATH)
216
+ endif()
217
+ endif()
218
+ if(WIN10SDK_INCLUDE_PATH)
219
+ message("Building with Win10 STL fixing service support")
220
+ add_definitions(-DHAS_WIN10SDK)
221
+ include_directories("${WIN10SDK_INCLUDE_PATH}")
222
+ else()
223
+ message("Building without Win10 STL fixing service support")
224
+ endif()
225
+ endif()
226
+
227
+ if (APPLE)
228
+ message("OS X SDK Path: ${CMAKE_OSX_SYSROOT}")
229
+ if (CMAKE_OSX_DEPLOYMENT_TARGET)
230
+ message("OS X Deployment Target: ${CMAKE_OSX_DEPLOYMENT_TARGET}")
231
+ else ()
232
+ message("OS X Deployment Target: (default)")
233
+ endif ()
234
+ endif ()
235
+
236
+ if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
237
+ find_package(PkgConfig REQUIRED)
238
+
239
+ if (CMAKE_VERSION VERSION_LESS "3.1")
240
+ # Workaround for an old CMake, which does not understand CMAKE_CXX_STANDARD.
241
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
242
+ endif()
243
+
244
+ # Boost on Raspberry-Pi does not link to pthreads.
245
+ set(THREADS_PREFER_PTHREAD_FLAG ON)
246
+ find_package(Threads REQUIRED)
247
+
248
+ find_package(DBus1 REQUIRED)
249
+ endif()
250
+
251
+ if (CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUXX)
252
+ # Adding -fext-numeric-literals to enable GCC extensions on definitions of quad float literals, which are required by Boost.
253
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fext-numeric-literals" )
254
+ endif()
255
+
256
+ if (NOT MSVC AND ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang"))
257
+ if (NOT MINGW)
258
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall" )
259
+ endif ()
260
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reorder" )
261
+
262
+ # On GCC and Clang, no return from a non-void function is a warning only. Here, we make it an error.
263
+ add_compile_options(-Werror=return-type)
264
+
265
+ # removes LOTS of extraneous Eigen warnings (GCC only supports it since 6.1)
266
+ # https://eigen.tuxfamily.org/bz/show_bug.cgi?id=1221
267
+ if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" OR CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 6.0)
268
+ add_compile_options(-Wno-ignored-attributes) # Tamas: Eigen include dirs are marked as SYSTEM
269
+ endif()
270
+
271
+ # Clang reports legacy OpenGL calls as deprecated. Turn off the warning for now
272
+ # to reduce the clutter, we know about this one. It should be reenabled after
273
+ # we finally get rid of the deprecated code.
274
+ if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
275
+ add_compile_options(-Wno-deprecated-declarations)
276
+ endif()
277
+
278
+ # Clang reports misleading indentation for some IF blocks because of mixing tabs with spaces.
279
+ if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
280
+ add_compile_options(-Wno-misleading-indentation)
281
+ endif()
282
+
283
+ #GCC generates loads of -Wunknown-pragmas when compiling igl. The fix is not easy due to a bug in gcc, see
284
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66943 or
285
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53431
286
+ # We will turn the warning of for GCC for now:
287
+ if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
288
+ add_compile_options(-Wno-unknown-pragmas)
289
+ endif()
290
+
291
+ endif()
292
+
293
+ if (SLIC3R_ASAN)
294
+ # ASAN should be available on MSVC starting with Visual Studio 2019 16.9
295
+ # https://devblogs.microsoft.com/cppblog/address-sanitizer-for-msvc-now-generally-available/
296
+ add_compile_options(-fsanitize=address)
297
+
298
+ if (NOT MSVC)
299
+ add_compile_options(-fno-omit-frame-pointer)
300
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address")
301
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fsanitize=address")
302
+ set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fsanitize=address")
303
+ endif ()
304
+
305
+ if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
306
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -lasan")
307
+ endif ()
308
+ endif ()
309
+
310
+ if (SLIC3R_UBSAN)
311
+ # Stacktrace for every report is enabled by default. It can be disabled by running PrusaSlicer with "UBSAN_OPTIONS=print_stacktrace=0".
312
+
313
+ # Define macro SLIC3R_UBSAN to allow detection in the source code if this sanitizer is enabled.
314
+ add_compile_definitions(SLIC3R_UBSAN)
315
+
316
+ # Clang supports much more useful checks than GCC, so when Clang is detected, another checks will be enabled.
317
+ # List of what GCC is checking: https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html
318
+ # List of what Clang is checking: https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html
319
+ if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
320
+ set(_ubsan_flags "-fsanitize=undefined,integer")
321
+ else ()
322
+ set(_ubsan_flags "-fsanitize=undefined")
323
+ endif ()
324
+
325
+ add_compile_options(${_ubsan_flags} -fno-omit-frame-pointer)
326
+
327
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${_ubsan_flags}")
328
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${_ubsan_flags}")
329
+ set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${_ubsan_flags}")
330
+
331
+ if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
332
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -lubsan")
333
+ endif ()
334
+ endif ()
335
+
336
+ if (APPLE)
337
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror=partial-availability -Werror=unguarded-availability -Werror=unguarded-availability-new")
338
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror=partial-availability -Werror=unguarded-availability -Werror=unguarded-availability-new")
339
+ endif ()
340
+
341
+ set(LIBDIR_BIN ${CMAKE_CURRENT_BINARY_DIR}/src)
342
+ # For generated header files
343
+ include_directories(${LIBDIR_BIN}/platform)
344
+
345
+ if(WIN32)
346
+ add_definitions(-D_USE_MATH_DEFINES -D_WIN32 -D_CRT_SECURE_NO_WARNINGS -D_SCL_SECURE_NO_WARNINGS)
347
+ if(MSVC)
348
+ # BOOST_ALL_NO_LIB: Avoid the automatic linking of Boost libraries on Windows. Rather rely on explicit linking.
349
+ add_definitions(-DBOOST_ALL_NO_LIB -DBOOST_USE_WINAPI_VERSION=0x601 -DBOOST_SYSTEM_USE_UTF8 )
350
+ # Force the source code encoding to UTF-8. See PrusaSlicer GH pull request #5583
351
+ add_compile_options("$<$<C_COMPILER_ID:MSVC>:/utf-8>")
352
+ add_compile_options("$<$<CXX_COMPILER_ID:MSVC>:/utf-8>")
353
+ endif(MSVC)
354
+ endif(WIN32)
355
+
356
+ add_definitions(-DwxUSE_UNICODE -D_UNICODE -DUNICODE -DWXINTL_NO_GETTEXT_MACRO)
357
+
358
+ # Disable unsafe implicit wxString to const char* / std::string and vice versa. This implicit conversion breaks the UTF-8 encoding quite often.
359
+ add_definitions(-DwxNO_UNSAFE_WXSTRING_CONV)
360
+
361
+ # Find and configure boost
362
+ if(SLIC3R_STATIC)
363
+ # Use static boost libraries.
364
+ set(Boost_USE_STATIC_LIBS ON)
365
+ # Use boost libraries linked statically to the C++ runtime.
366
+ # set(Boost_USE_STATIC_RUNTIME ON)
367
+ endif()
368
+ #set(Boost_DEBUG ON)
369
+ # set(Boost_COMPILER "-mgw81")
370
+ # boost::process was introduced first in version 1.64.0,
371
+ # boost::beast::detail::base64 was introduced first in version 1.66.0
372
+ set(MINIMUM_BOOST_VERSION "1.83.0")
373
+ set(_boost_components "system;filesystem;thread;log;locale;regex;chrono;atomic;date_time;iostreams;nowide")
374
+ find_package(Boost ${MINIMUM_BOOST_VERSION} REQUIRED COMPONENTS ${_boost_components})
375
+
376
+ find_package(Eigen3 3.3.7 REQUIRED)
377
+
378
+ add_library(boost_libs INTERFACE)
379
+ add_library(boost_headeronly INTERFACE)
380
+
381
+ if (APPLE)
382
+ # BOOST_ASIO_DISABLE_KQUEUE : prevents a Boost ASIO bug on OS X: https://svn.boost.org/trac/boost/ticket/5339
383
+ target_compile_definitions(boost_headeronly INTERFACE BOOST_ASIO_DISABLE_KQUEUE)
384
+ endif()
385
+
386
+ if(NOT SLIC3R_STATIC)
387
+ target_compile_definitions(boost_headeronly INTERFACE BOOST_LOG_DYN_LINK)
388
+ endif()
389
+
390
+ function(slic3r_remap_configs targets from_Cfg to_Cfg)
391
+ if(MSVC)
392
+ string(TOUPPER ${from_Cfg} from_CFG)
393
+
394
+ foreach(tgt ${targets})
395
+ if(TARGET ${tgt})
396
+ set_target_properties(${tgt} PROPERTIES MAP_IMPORTED_CONFIG_${from_CFG} ${to_Cfg})
397
+ endif()
398
+ endforeach()
399
+ endif()
400
+ endfunction()
401
+
402
+ if(TARGET Boost::system)
403
+ message(STATUS "Boost::boost exists")
404
+ target_link_libraries(boost_headeronly INTERFACE Boost::boost)
405
+
406
+ # Only from cmake 3.12
407
+ # list(TRANSFORM _boost_components PREPEND Boost:: OUTPUT_VARIABLE _boost_targets)
408
+ set(_boost_targets "")
409
+ foreach(comp ${_boost_components})
410
+ list(APPEND _boost_targets "Boost::${comp}")
411
+ endforeach()
412
+
413
+ target_link_libraries(boost_libs INTERFACE
414
+ boost_headeronly # includes the custom compile definitions as well
415
+ ${_boost_targets}
416
+ )
417
+ slic3r_remap_configs("${_boost_targets}" RelWithDebInfo Release)
418
+ else()
419
+ target_include_directories(boost_headeronly INTERFACE ${Boost_INCLUDE_DIRS})
420
+ target_link_libraries(boost_libs INTERFACE boost_headeronly ${Boost_LIBRARIES})
421
+ endif()
422
+
423
+
424
+
425
+ # Find and configure intel-tbb
426
+ if(SLIC3R_STATIC)
427
+ set(TBB_STATIC 1)
428
+ endif()
429
+ set(TBB_DEBUG 1)
430
+ find_package(TBB REQUIRED)
431
+ slic3r_remap_configs(TBB::tbb RelWithDebInfo Release)
432
+ slic3r_remap_configs(TBB::tbbmalloc RelWithDebInfo Release)
433
+ # include_directories(${TBB_INCLUDE_DIRS})
434
+ # add_definitions(${TBB_DEFINITIONS})
435
+ # if(MSVC)
436
+ # # Suppress implicit linking of the TBB libraries by the Visual Studio compiler.
437
+ # add_definitions(-D__TBB_NO_IMPLICIT_LINKAGE)
438
+ # endif()
439
+ # The Intel TBB library will use the std::exception_ptr feature of C++11.
440
+ # add_definitions(-DTBB_USE_CAPTURED_EXCEPTION=0)
441
+
442
+ find_package(CURL REQUIRED)
443
+
444
+ add_library(libcurl INTERFACE)
445
+ target_link_libraries(libcurl INTERFACE CURL::libcurl)
446
+
447
+ # Fixing curl's cmake config script bugs
448
+ if (NOT WIN32)
449
+ # Required by libcurl
450
+ find_package(ZLIB REQUIRED)
451
+ target_link_libraries(libcurl INTERFACE ZLIB::ZLIB)
452
+ else()
453
+ target_link_libraries(libcurl INTERFACE crypt32)
454
+ endif()
455
+
456
+ ## OPTIONAL packages
457
+
458
+ # Find expat. We have our overriden FindEXPAT which exports libexpat target
459
+ # no matter what.
460
+ find_package(EXPAT REQUIRED)
461
+
462
+ add_library(libexpat INTERFACE)
463
+
464
+ if (TARGET EXPAT::EXPAT ) # found by a newer Find script
465
+ target_link_libraries(libexpat INTERFACE EXPAT::EXPAT)
466
+ elseif(TARGET expat::expat) # found by a config script
467
+ target_link_libraries(libexpat INTERFACE expat::expat)
468
+ else() # found by an older Find script
469
+ target_link_libraries(libexpat INTERFACE ${EXPAT_LIBRARIES})
470
+ endif ()
471
+
472
+ find_package(PNG REQUIRED)
473
+
474
+ set(OpenGL_GL_PREFERENCE "LEGACY")
475
+ find_package(OpenGL REQUIRED)
476
+
477
+ # Find glew or use bundled version
478
+ if (SLIC3R_STATIC AND NOT SLIC3R_STATIC_EXCLUDE_GLEW)
479
+ set(GLEW_USE_STATIC_LIBS ON)
480
+ set(GLEW_VERBOSE ON)
481
+ endif()
482
+
483
+ find_package(GLEW REQUIRED)
484
+
485
+ # Find the Cereal serialization library
486
+ find_package(cereal REQUIRED)
487
+ add_library(libcereal INTERFACE)
488
+ if (NOT TARGET cereal::cereal)
489
+ target_link_libraries(libcereal INTERFACE cereal)
490
+ else()
491
+ target_link_libraries(libcereal INTERFACE cereal::cereal)
492
+ endif()
493
+
494
+ # l10n
495
+ set(L10N_DIR "${SLIC3R_RESOURCES_DIR}/localization")
496
+ add_custom_target(gettext_make_pot
497
+ COMMAND xgettext --keyword=L --keyword=_L --keyword=_u8L --keyword=L_CONTEXT:1,2c --keyword=_L_PLURAL:1,2 --add-comments=TRN --from-code=UTF-8 --debug --boost
498
+ -f "${L10N_DIR}/list.txt"
499
+ -o "${L10N_DIR}/PrusaSlicer.pot"
500
+ COMMAND hintsToPot ${SLIC3R_RESOURCES_DIR} ${L10N_DIR}
501
+ WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
502
+ COMMENT "Generate pot file from strings in the source tree"
503
+ )
504
+
505
+ add_custom_target(gettext_merge_community_po_with_pot
506
+ WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
507
+ COMMENT "Merge community po with new generated pot file"
508
+ )
509
+ file(GLOB L10N_PO_FILES "${L10N_DIR}/*/PrusaSlicer*.po")
510
+ # list of names of directories, which are licalized by PS internally
511
+ list(APPEND PS_L10N_DIRS "cs" "de" "es" "fr" "it" "ja" "pl")
512
+ foreach(po_file ${L10N_PO_FILES})
513
+ GET_FILENAME_COMPONENT(po_dir "${po_file}" DIRECTORY)
514
+ GET_FILENAME_COMPONENT(po_dir_name "${po_dir}" NAME)
515
+ list(FIND PS_L10N_DIRS ${po_dir_name} found_dir_id)
516
+ # found_dir_id==-1 means that po_dir_name wasn't found in PS_L10N_DIRS
517
+ if(found_dir_id LESS 0)
518
+ add_custom_command(
519
+ TARGET gettext_merge_community_po_with_pot PRE_BUILD
520
+ COMMAND msgmerge -N -o ${po_file} ${po_file} "${L10N_DIR}/PrusaSlicer.pot"
521
+ # delete obsolete lines from resulting PO to avoid conflicts after a merging of it with wxWidgets.po
522
+ COMMAND msgattrib --no-obsolete -o ${po_file} ${po_file}
523
+ DEPENDS ${po_file}
524
+ )
525
+ endif()
526
+ endforeach()
527
+
528
+ add_custom_target(gettext_concat_wx_po_with_po
529
+ WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
530
+ COMMENT "Concatenate and merge wxWidgets localization po with PrusaSlicer po file"
531
+ )
532
+ file(GLOB L10N_PO_FILES "${L10N_DIR}/*/PrusaSlicer*.po")
533
+ foreach(po_file ${L10N_PO_FILES})
534
+ GET_FILENAME_COMPONENT(po_dir "${po_file}" DIRECTORY)
535
+ GET_FILENAME_COMPONENT(po_dir_name "${po_dir}" NAME)
536
+ SET(wx_po_file "${L10N_DIR}/wx_locale/${po_dir_name}.po")
537
+ #SET(po_new_file "${po_dir}/PrusaSlicer_.po")
538
+ add_custom_command(
539
+ TARGET gettext_concat_wx_po_with_po PRE_BUILD
540
+ COMMAND msgcat --use-first -o ${po_file} ${po_file} ${wx_po_file}
541
+ # delete obsolete lines from resulting PO
542
+ COMMAND msgattrib --no-obsolete -o ${po_file} ${po_file}
543
+ DEPENDS ${po_file}
544
+ )
545
+ endforeach()
546
+
547
+ add_custom_target(gettext_po_to_mo
548
+ WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
549
+ COMMENT "Generate localization mo files (binary) from po files (texts)"
550
+ )
551
+ file(GLOB L10N_PO_FILES "${L10N_DIR}/*/PrusaSlicer*.po")
552
+ foreach(po_file ${L10N_PO_FILES})
553
+ GET_FILENAME_COMPONENT(po_dir "${po_file}" DIRECTORY)
554
+ SET(mo_file "${po_dir}/PrusaSlicer.mo")
555
+ add_custom_command(
556
+ TARGET gettext_po_to_mo PRE_BUILD
557
+ COMMAND msgfmt ARGS --check-format -o ${mo_file} ${po_file}
558
+ #COMMAND msgfmt ARGS --check-compatibility -o ${mo_file} ${po_file}
559
+ DEPENDS ${po_file}
560
+ )
561
+ endforeach()
562
+
563
+ find_package(NLopt 1.4 REQUIRED)
564
+ slic3r_remap_configs(NLopt::nlopt RelWithDebInfo Release)
565
+
566
+ if(SLIC3R_STATIC)
567
+ set(OPENVDB_USE_STATIC_LIBS ON)
568
+ set(USE_BLOSC TRUE)
569
+ endif ()
570
+
571
+ find_package(OpenVDB 5.0 COMPONENTS openvdb)
572
+ if(OpenVDB_FOUND)
573
+ slic3r_remap_configs(IlmBase::Half RelWithDebInfo Release)
574
+ slic3r_remap_configs(Blosc::blosc RelWithDebInfo Release)
575
+ else ()
576
+ message(FATAL_ERROR "OpenVDB could not be found with the bundled find module. "
577
+ "You can try to specify the find module location of your "
578
+ "OpenVDB installation with the OPENVDB_FIND_MODULE_PATH cache variable.")
579
+ endif ()
580
+
581
+ set(TOP_LEVEL_PROJECT_DIR ${PROJECT_SOURCE_DIR})
582
+ function(prusaslicer_copy_dlls target)
583
+ if ("${CMAKE_SIZEOF_VOID_P}" STREQUAL "8")
584
+ set(_bits 64)
585
+ elseif ("${CMAKE_SIZEOF_VOID_P}" STREQUAL "4")
586
+ set(_bits 32)
587
+ endif ()
588
+
589
+ get_property(_is_multi GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
590
+ get_target_property(_alt_out_dir ${target} RUNTIME_OUTPUT_DIRECTORY)
591
+
592
+ if (_alt_out_dir)
593
+ set (_out_dir "${_alt_out_dir}")
594
+ elseif (_is_multi)
595
+ set (_out_dir "$<TARGET_PROPERTY:${target},BINARY_DIR>/$<CONFIG>")
596
+ else ()
597
+ set (_out_dir "$<TARGET_PROPERTY:${target},BINARY_DIR>")
598
+ endif ()
599
+
600
+ # This has to be a separate target due to the windows command line lenght limits
601
+ add_custom_command(TARGET ${target} POST_BUILD
602
+ COMMAND ${CMAKE_COMMAND} -E copy ${TOP_LEVEL_PROJECT_DIR}/deps/+GMP/gmp/lib/win${_bits}/libgmp-10.dll ${_out_dir}
603
+ COMMENT "Copy gmp runtime to build tree"
604
+ VERBATIM)
605
+
606
+ add_custom_command(TARGET ${target} POST_BUILD
607
+ COMMAND ${CMAKE_COMMAND} -E copy ${TOP_LEVEL_PROJECT_DIR}/deps/+MPFR/mpfr/lib/win${_bits}/libmpfr-4.dll ${_out_dir}
608
+ COMMENT "Copy mpfr runtime to build tree"
609
+ VERBATIM)
610
+ endfunction()
611
+
612
+ add_subdirectory(build-utils)
613
+ add_subdirectory(bundled_deps)
614
+ # libslic3r, PrusaSlicer GUI and the PrusaSlicer executable.
615
+ add_subdirectory(src)
616
+ set_property(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY VS_STARTUP_PROJECT PrusaSlicer_app_console)
617
+
618
+ add_dependencies(gettext_make_pot hintsToPot)
619
+
620
+ if(SLIC3R_BUILD_SANDBOXES)
621
+ add_subdirectory(sandboxes)
622
+ endif()
623
+
624
+ if(SLIC3R_BUILD_TESTS)
625
+ add_subdirectory(tests)
626
+ endif()
627
+
628
+
629
+ if (WIN32)
630
+ install(DIRECTORY "${SLIC3R_RESOURCES_DIR}/" DESTINATION "${CMAKE_INSTALL_PREFIX}/resources")
631
+ elseif (SLIC3R_FHS)
632
+ # CMAKE_INSTALL_FULL_DATAROOTDIR: read-only architecture-independent data root (share)
633
+ set(SLIC3R_FHS_RESOURCES "${CMAKE_INSTALL_FULL_DATAROOTDIR}/PrusaSlicer")
634
+ install(DIRECTORY ${SLIC3R_RESOURCES_DIR}/ DESTINATION ${SLIC3R_FHS_RESOURCES}
635
+ PATTERN "*/udev" EXCLUDE
636
+ )
637
+ install(FILES src/platform/unix/PrusaSlicer.desktop DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/applications)
638
+ install(FILES src/platform/unix/PrusaGcodeviewer.desktop DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/applications)
639
+ foreach(SIZE 32 128 192)
640
+ install(FILES ${SLIC3R_RESOURCES_DIR}/icons/PrusaSlicer_${SIZE}px.png
641
+ DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/icons/hicolor/${SIZE}x${SIZE}/apps RENAME PrusaSlicer.png
642
+ )
643
+ install(FILES ${SLIC3R_RESOURCES_DIR}/icons/PrusaSlicer-gcodeviewer_${SIZE}px.png
644
+ DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/icons/hicolor/${SIZE}x${SIZE}/apps RENAME PrusaSlicer-gcodeviewer.png
645
+ )
646
+ endforeach()
647
+ install(DIRECTORY ${SLIC3R_RESOURCES_DIR}/udev/ DESTINATION lib/udev/rules.d)
648
+ target_compile_definitions(PrusaSlicer PUBLIC SLIC3R_FHS SLIC3R_FHS_RESOURCES="${SLIC3R_FHS_RESOURCES}")
649
+ else ()
650
+ install(FILES src/platform/unix/PrusaSlicer.desktop DESTINATION ${CMAKE_INSTALL_PREFIX}/resources/applications)
651
+ install(FILES src/platform/unix/PrusaGcodeviewer.desktop DESTINATION ${CMAKE_INSTALL_PREFIX}/resources/applications)
652
+ install(DIRECTORY "${SLIC3R_RESOURCES_DIR}/" DESTINATION "${CMAKE_INSTALL_PREFIX}/resources")
653
+ endif ()
data/CMakePresets.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 3,
3
+ "configurePresets": [
4
+ {
5
+ "name": "default",
6
+ "displayName": "Default Config",
7
+ "description": "Building with statically linked dependencies",
8
+ "binaryDir": "${sourceDir}/build-default",
9
+ "cacheVariables": {
10
+ "CMAKE_BUILD_TYPE": "Release",
11
+ "SLIC3R_STATIC": true,
12
+ "SLIC3R_GTK": "3",
13
+ "SLIC3R_ENC_CHECK": false,
14
+ "SLIC3R_PCH": true,
15
+ "CMAKE_INSTALL_PREFIX": "${sourceDir}/build-default/dist",
16
+ "PrusaSlicer_DEPS_PRESET": "default",
17
+ "PrusaSlicer_DEPS_OUTPUT_QUIET": false
18
+ }
19
+ },
20
+ {
21
+ "name": "no-occt",
22
+ "displayName": "Without STEP",
23
+ "description": "Building with statically linked dependencies without STEP file support",
24
+ "inherits": "default",
25
+ "binaryDir": "${sourceDir}/build-no-occt",
26
+ "cacheVariables": {
27
+ "SLIC3R_ENABLE_FORMAT_STEP": false,
28
+ "PrusaSlicer_DEPS_PRESET": "no-occt"
29
+ }
30
+ },
31
+ {
32
+ "name": "shareddeps",
33
+ "displayName": "Shared dependencies",
34
+ "description": "Building with dynamically linked dependencies from the system",
35
+ "binaryDir": "${sourceDir}/shareddeps",
36
+ "cacheVariables": {
37
+ "CMAKE_BUILD_TYPE": "Release",
38
+ "SLIC3R_STATIC": false,
39
+ "SLIC3R_GTK": "3",
40
+ "SLIC3R_ENC_CHECK": false,
41
+ "SLIC3R_PCH": true,
42
+ "PrusaSlicer_BUILD_DEPS": false
43
+ }
44
+ }
45
+ ]
46
+ }
data/LICENSE ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU AFFERO GENERAL PUBLIC LICENSE
2
+ Version 3, 19 November 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU Affero General Public License is a free, copyleft license for
11
+ software and other kinds of works, specifically designed to ensure
12
+ cooperation with the community in the case of network server software.
13
+
14
+ The licenses for most software and other practical works are designed
15
+ to take away your freedom to share and change the works. By contrast,
16
+ our General Public Licenses are intended to guarantee your freedom to
17
+ share and change all versions of a program--to make sure it remains free
18
+ software for all its users.
19
+
20
+ When we speak of free software, we are referring to freedom, not
21
+ price. Our General Public Licenses are designed to make sure that you
22
+ have the freedom to distribute copies of free software (and charge for
23
+ them if you wish), that you receive source code or can get it if you
24
+ want it, that you can change the software or use pieces of it in new
25
+ free programs, and that you know you can do these things.
26
+
27
+ Developers that use our General Public Licenses protect your rights
28
+ with two steps: (1) assert copyright on the software, and (2) offer
29
+ you this License which gives you legal permission to copy, distribute
30
+ and/or modify the software.
31
+
32
+ A secondary benefit of defending all users' freedom is that
33
+ improvements made in alternate versions of the program, if they
34
+ receive widespread use, become available for other developers to
35
+ incorporate. Many developers of free software are heartened and
36
+ encouraged by the resulting cooperation. However, in the case of
37
+ software used on network servers, this result may fail to come about.
38
+ The GNU General Public License permits making a modified version and
39
+ letting the public access it on a server without ever releasing its
40
+ source code to the public.
41
+
42
+ The GNU Affero General Public License is designed specifically to
43
+ ensure that, in such cases, the modified source code becomes available
44
+ to the community. It requires the operator of a network server to
45
+ provide the source code of the modified version running there to the
46
+ users of that server. Therefore, public use of a modified version, on
47
+ a publicly accessible server, gives the public access to the source
48
+ code of the modified version.
49
+
50
+ An older license, called the Affero General Public License and
51
+ published by Affero, was designed to accomplish similar goals. This is
52
+ a different license, not a version of the Affero GPL, but Affero has
53
+ released a new version of the Affero GPL which permits relicensing under
54
+ this license.
55
+
56
+ The precise terms and conditions for copying, distribution and
57
+ modification follow.
58
+
59
+ TERMS AND CONDITIONS
60
+
61
+ 0. Definitions.
62
+
63
+ "This License" refers to version 3 of the GNU Affero General Public License.
64
+
65
+ "Copyright" also means copyright-like laws that apply to other kinds of
66
+ works, such as semiconductor masks.
67
+
68
+ "The Program" refers to any copyrightable work licensed under this
69
+ License. Each licensee is addressed as "you". "Licensees" and
70
+ "recipients" may be individuals or organizations.
71
+
72
+ To "modify" a work means to copy from or adapt all or part of the work
73
+ in a fashion requiring copyright permission, other than the making of an
74
+ exact copy. The resulting work is called a "modified version" of the
75
+ earlier work or a work "based on" the earlier work.
76
+
77
+ A "covered work" means either the unmodified Program or a work based
78
+ on the Program.
79
+
80
+ To "propagate" a work means to do anything with it that, without
81
+ permission, would make you directly or secondarily liable for
82
+ infringement under applicable copyright law, except executing it on a
83
+ computer or modifying a private copy. Propagation includes copying,
84
+ distribution (with or without modification), making available to the
85
+ public, and in some countries other activities as well.
86
+
87
+ To "convey" a work means any kind of propagation that enables other
88
+ parties to make or receive copies. Mere interaction with a user through
89
+ a computer network, with no transfer of a copy, is not conveying.
90
+
91
+ An interactive user interface displays "Appropriate Legal Notices"
92
+ to the extent that it includes a convenient and prominently visible
93
+ feature that (1) displays an appropriate copyright notice, and (2)
94
+ tells the user that there is no warranty for the work (except to the
95
+ extent that warranties are provided), that licensees may convey the
96
+ work under this License, and how to view a copy of this License. If
97
+ the interface presents a list of user commands or options, such as a
98
+ menu, a prominent item in the list meets this criterion.
99
+
100
+ 1. Source Code.
101
+
102
+ The "source code" for a work means the preferred form of the work
103
+ for making modifications to it. "Object code" means any non-source
104
+ form of a work.
105
+
106
+ A "Standard Interface" means an interface that either is an official
107
+ standard defined by a recognized standards body, or, in the case of
108
+ interfaces specified for a particular programming language, one that
109
+ is widely used among developers working in that language.
110
+
111
+ The "System Libraries" of an executable work include anything, other
112
+ than the work as a whole, that (a) is included in the normal form of
113
+ packaging a Major Component, but which is not part of that Major
114
+ Component, and (b) serves only to enable use of the work with that
115
+ Major Component, or to implement a Standard Interface for which an
116
+ implementation is available to the public in source code form. A
117
+ "Major Component", in this context, means a major essential component
118
+ (kernel, window system, and so on) of the specific operating system
119
+ (if any) on which the executable work runs, or a compiler used to
120
+ produce the work, or an object code interpreter used to run it.
121
+
122
+ The "Corresponding Source" for a work in object code form means all
123
+ the source code needed to generate, install, and (for an executable
124
+ work) run the object code and to modify the work, including scripts to
125
+ control those activities. However, it does not include the work's
126
+ System Libraries, or general-purpose tools or generally available free
127
+ programs which are used unmodified in performing those activities but
128
+ which are not part of the work. For example, Corresponding Source
129
+ includes interface definition files associated with source files for
130
+ the work, and the source code for shared libraries and dynamically
131
+ linked subprograms that the work is specifically designed to require,
132
+ such as by intimate data communication or control flow between those
133
+ subprograms and other parts of the work.
134
+
135
+ The Corresponding Source need not include anything that users
136
+ can regenerate automatically from other parts of the Corresponding
137
+ Source.
138
+
139
+ The Corresponding Source for a work in source code form is that
140
+ same work.
141
+
142
+ 2. Basic Permissions.
143
+
144
+ All rights granted under this License are granted for the term of
145
+ copyright on the Program, and are irrevocable provided the stated
146
+ conditions are met. This License explicitly affirms your unlimited
147
+ permission to run the unmodified Program. The output from running a
148
+ covered work is covered by this License only if the output, given its
149
+ content, constitutes a covered work. This License acknowledges your
150
+ rights of fair use or other equivalent, as provided by copyright law.
151
+
152
+ You may make, run and propagate covered works that you do not
153
+ convey, without conditions so long as your license otherwise remains
154
+ in force. You may convey covered works to others for the sole purpose
155
+ of having them make modifications exclusively for you, or provide you
156
+ with facilities for running those works, provided that you comply with
157
+ the terms of this License in conveying all material for which you do
158
+ not control copyright. Those thus making or running the covered works
159
+ for you must do so exclusively on your behalf, under your direction
160
+ and control, on terms that prohibit them from making any copies of
161
+ your copyrighted material outside their relationship with you.
162
+
163
+ Conveying under any other circumstances is permitted solely under
164
+ the conditions stated below. Sublicensing is not allowed; section 10
165
+ makes it unnecessary.
166
+
167
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
168
+
169
+ No covered work shall be deemed part of an effective technological
170
+ measure under any applicable law fulfilling obligations under article
171
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
172
+ similar laws prohibiting or restricting circumvention of such
173
+ measures.
174
+
175
+ When you convey a covered work, you waive any legal power to forbid
176
+ circumvention of technological measures to the extent such circumvention
177
+ is effected by exercising rights under this License with respect to
178
+ the covered work, and you disclaim any intention to limit operation or
179
+ modification of the work as a means of enforcing, against the work's
180
+ users, your or third parties' legal rights to forbid circumvention of
181
+ technological measures.
182
+
183
+ 4. Conveying Verbatim Copies.
184
+
185
+ You may convey verbatim copies of the Program's source code as you
186
+ receive it, in any medium, provided that you conspicuously and
187
+ appropriately publish on each copy an appropriate copyright notice;
188
+ keep intact all notices stating that this License and any
189
+ non-permissive terms added in accord with section 7 apply to the code;
190
+ keep intact all notices of the absence of any warranty; and give all
191
+ recipients a copy of this License along with the Program.
192
+
193
+ You may charge any price or no price for each copy that you convey,
194
+ and you may offer support or warranty protection for a fee.
195
+
196
+ 5. Conveying Modified Source Versions.
197
+
198
+ You may convey a work based on the Program, or the modifications to
199
+ produce it from the Program, in the form of source code under the
200
+ terms of section 4, provided that you also meet all of these conditions:
201
+
202
+ a) The work must carry prominent notices stating that you modified
203
+ it, and giving a relevant date.
204
+
205
+ b) The work must carry prominent notices stating that it is
206
+ released under this License and any conditions added under section
207
+ 7. This requirement modifies the requirement in section 4 to
208
+ "keep intact all notices".
209
+
210
+ c) You must license the entire work, as a whole, under this
211
+ License to anyone who comes into possession of a copy. This
212
+ License will therefore apply, along with any applicable section 7
213
+ additional terms, to the whole of the work, and all its parts,
214
+ regardless of how they are packaged. This License gives no
215
+ permission to license the work in any other way, but it does not
216
+ invalidate such permission if you have separately received it.
217
+
218
+ d) If the work has interactive user interfaces, each must display
219
+ Appropriate Legal Notices; however, if the Program has interactive
220
+ interfaces that do not display Appropriate Legal Notices, your
221
+ work need not make them do so.
222
+
223
+ A compilation of a covered work with other separate and independent
224
+ works, which are not by their nature extensions of the covered work,
225
+ and which are not combined with it such as to form a larger program,
226
+ in or on a volume of a storage or distribution medium, is called an
227
+ "aggregate" if the compilation and its resulting copyright are not
228
+ used to limit the access or legal rights of the compilation's users
229
+ beyond what the individual works permit. Inclusion of a covered work
230
+ in an aggregate does not cause this License to apply to the other
231
+ parts of the aggregate.
232
+
233
+ 6. Conveying Non-Source Forms.
234
+
235
+ You may convey a covered work in object code form under the terms
236
+ of sections 4 and 5, provided that you also convey the
237
+ machine-readable Corresponding Source under the terms of this License,
238
+ in one of these ways:
239
+
240
+ a) Convey the object code in, or embodied in, a physical product
241
+ (including a physical distribution medium), accompanied by the
242
+ Corresponding Source fixed on a durable physical medium
243
+ customarily used for software interchange.
244
+
245
+ b) Convey the object code in, or embodied in, a physical product
246
+ (including a physical distribution medium), accompanied by a
247
+ written offer, valid for at least three years and valid for as
248
+ long as you offer spare parts or customer support for that product
249
+ model, to give anyone who possesses the object code either (1) a
250
+ copy of the Corresponding Source for all the software in the
251
+ product that is covered by this License, on a durable physical
252
+ medium customarily used for software interchange, for a price no
253
+ more than your reasonable cost of physically performing this
254
+ conveying of source, or (2) access to copy the
255
+ Corresponding Source from a network server at no charge.
256
+
257
+ c) Convey individual copies of the object code with a copy of the
258
+ written offer to provide the Corresponding Source. This
259
+ alternative is allowed only occasionally and noncommercially, and
260
+ only if you received the object code with such an offer, in accord
261
+ with subsection 6b.
262
+
263
+ d) Convey the object code by offering access from a designated
264
+ place (gratis or for a charge), and offer equivalent access to the
265
+ Corresponding Source in the same way through the same place at no
266
+ further charge. You need not require recipients to copy the
267
+ Corresponding Source along with the object code. If the place to
268
+ copy the object code is a network server, the Corresponding Source
269
+ may be on a different server (operated by you or a third party)
270
+ that supports equivalent copying facilities, provided you maintain
271
+ clear directions next to the object code saying where to find the
272
+ Corresponding Source. Regardless of what server hosts the
273
+ Corresponding Source, you remain obligated to ensure that it is
274
+ available for as long as needed to satisfy these requirements.
275
+
276
+ e) Convey the object code using peer-to-peer transmission, provided
277
+ you inform other peers where the object code and Corresponding
278
+ Source of the work are being offered to the general public at no
279
+ charge under subsection 6d.
280
+
281
+ A separable portion of the object code, whose source code is excluded
282
+ from the Corresponding Source as a System Library, need not be
283
+ included in conveying the object code work.
284
+
285
+ A "User Product" is either (1) a "consumer product", which means any
286
+ tangible personal property which is normally used for personal, family,
287
+ or household purposes, or (2) anything designed or sold for incorporation
288
+ into a dwelling. In determining whether a product is a consumer product,
289
+ doubtful cases shall be resolved in favor of coverage. For a particular
290
+ product received by a particular user, "normally used" refers to a
291
+ typical or common use of that class of product, regardless of the status
292
+ of the particular user or of the way in which the particular user
293
+ actually uses, or expects or is expected to use, the product. A product
294
+ is a consumer product regardless of whether the product has substantial
295
+ commercial, industrial or non-consumer uses, unless such uses represent
296
+ the only significant mode of use of the product.
297
+
298
+ "Installation Information" for a User Product means any methods,
299
+ procedures, authorization keys, or other information required to install
300
+ and execute modified versions of a covered work in that User Product from
301
+ a modified version of its Corresponding Source. The information must
302
+ suffice to ensure that the continued functioning of the modified object
303
+ code is in no case prevented or interfered with solely because
304
+ modification has been made.
305
+
306
+ If you convey an object code work under this section in, or with, or
307
+ specifically for use in, a User Product, and the conveying occurs as
308
+ part of a transaction in which the right of possession and use of the
309
+ User Product is transferred to the recipient in perpetuity or for a
310
+ fixed term (regardless of how the transaction is characterized), the
311
+ Corresponding Source conveyed under this section must be accompanied
312
+ by the Installation Information. But this requirement does not apply
313
+ if neither you nor any third party retains the ability to install
314
+ modified object code on the User Product (for example, the work has
315
+ been installed in ROM).
316
+
317
+ The requirement to provide Installation Information does not include a
318
+ requirement to continue to provide support service, warranty, or updates
319
+ for a work that has been modified or installed by the recipient, or for
320
+ the User Product in which it has been modified or installed. Access to a
321
+ network may be denied when the modification itself materially and
322
+ adversely affects the operation of the network or violates the rules and
323
+ protocols for communication across the network.
324
+
325
+ Corresponding Source conveyed, and Installation Information provided,
326
+ in accord with this section must be in a format that is publicly
327
+ documented (and with an implementation available to the public in
328
+ source code form), and must require no special password or key for
329
+ unpacking, reading or copying.
330
+
331
+ 7. Additional Terms.
332
+
333
+ "Additional permissions" are terms that supplement the terms of this
334
+ License by making exceptions from one or more of its conditions.
335
+ Additional permissions that are applicable to the entire Program shall
336
+ be treated as though they were included in this License, to the extent
337
+ that they are valid under applicable law. If additional permissions
338
+ apply only to part of the Program, that part may be used separately
339
+ under those permissions, but the entire Program remains governed by
340
+ this License without regard to the additional permissions.
341
+
342
+ When you convey a copy of a covered work, you may at your option
343
+ remove any additional permissions from that copy, or from any part of
344
+ it. (Additional permissions may be written to require their own
345
+ removal in certain cases when you modify the work.) You may place
346
+ additional permissions on material, added by you to a covered work,
347
+ for which you have or can give appropriate copyright permission.
348
+
349
+ Notwithstanding any other provision of this License, for material you
350
+ add to a covered work, you may (if authorized by the copyright holders of
351
+ that material) supplement the terms of this License with terms:
352
+
353
+ a) Disclaiming warranty or limiting liability differently from the
354
+ terms of sections 15 and 16 of this License; or
355
+
356
+ b) Requiring preservation of specified reasonable legal notices or
357
+ author attributions in that material or in the Appropriate Legal
358
+ Notices displayed by works containing it; or
359
+
360
+ c) Prohibiting misrepresentation of the origin of that material, or
361
+ requiring that modified versions of such material be marked in
362
+ reasonable ways as different from the original version; or
363
+
364
+ d) Limiting the use for publicity purposes of names of licensors or
365
+ authors of the material; or
366
+
367
+ e) Declining to grant rights under trademark law for use of some
368
+ trade names, trademarks, or service marks; or
369
+
370
+ f) Requiring indemnification of licensors and authors of that
371
+ material by anyone who conveys the material (or modified versions of
372
+ it) with contractual assumptions of liability to the recipient, for
373
+ any liability that these contractual assumptions directly impose on
374
+ those licensors and authors.
375
+
376
+ All other non-permissive additional terms are considered "further
377
+ restrictions" within the meaning of section 10. If the Program as you
378
+ received it, or any part of it, contains a notice stating that it is
379
+ governed by this License along with a term that is a further
380
+ restriction, you may remove that term. If a license document contains
381
+ a further restriction but permits relicensing or conveying under this
382
+ License, you may add to a covered work material governed by the terms
383
+ of that license document, provided that the further restriction does
384
+ not survive such relicensing or conveying.
385
+
386
+ If you add terms to a covered work in accord with this section, you
387
+ must place, in the relevant source files, a statement of the
388
+ additional terms that apply to those files, or a notice indicating
389
+ where to find the applicable terms.
390
+
391
+ Additional terms, permissive or non-permissive, may be stated in the
392
+ form of a separately written license, or stated as exceptions;
393
+ the above requirements apply either way.
394
+
395
+ 8. Termination.
396
+
397
+ You may not propagate or modify a covered work except as expressly
398
+ provided under this License. Any attempt otherwise to propagate or
399
+ modify it is void, and will automatically terminate your rights under
400
+ this License (including any patent licenses granted under the third
401
+ paragraph of section 11).
402
+
403
+ However, if you cease all violation of this License, then your
404
+ license from a particular copyright holder is reinstated (a)
405
+ provisionally, unless and until the copyright holder explicitly and
406
+ finally terminates your license, and (b) permanently, if the copyright
407
+ holder fails to notify you of the violation by some reasonable means
408
+ prior to 60 days after the cessation.
409
+
410
+ Moreover, your license from a particular copyright holder is
411
+ reinstated permanently if the copyright holder notifies you of the
412
+ violation by some reasonable means, this is the first time you have
413
+ received notice of violation of this License (for any work) from that
414
+ copyright holder, and you cure the violation prior to 30 days after
415
+ your receipt of the notice.
416
+
417
+ Termination of your rights under this section does not terminate the
418
+ licenses of parties who have received copies or rights from you under
419
+ this License. If your rights have been terminated and not permanently
420
+ reinstated, you do not qualify to receive new licenses for the same
421
+ material under section 10.
422
+
423
+ 9. Acceptance Not Required for Having Copies.
424
+
425
+ You are not required to accept this License in order to receive or
426
+ run a copy of the Program. Ancillary propagation of a covered work
427
+ occurring solely as a consequence of using peer-to-peer transmission
428
+ to receive a copy likewise does not require acceptance. However,
429
+ nothing other than this License grants you permission to propagate or
430
+ modify any covered work. These actions infringe copyright if you do
431
+ not accept this License. Therefore, by modifying or propagating a
432
+ covered work, you indicate your acceptance of this License to do so.
433
+
434
+ 10. Automatic Licensing of Downstream Recipients.
435
+
436
+ Each time you convey a covered work, the recipient automatically
437
+ receives a license from the original licensors, to run, modify and
438
+ propagate that work, subject to this License. You are not responsible
439
+ for enforcing compliance by third parties with this License.
440
+
441
+ An "entity transaction" is a transaction transferring control of an
442
+ organization, or substantially all assets of one, or subdividing an
443
+ organization, or merging organizations. If propagation of a covered
444
+ work results from an entity transaction, each party to that
445
+ transaction who receives a copy of the work also receives whatever
446
+ licenses to the work the party's predecessor in interest had or could
447
+ give under the previous paragraph, plus a right to possession of the
448
+ Corresponding Source of the work from the predecessor in interest, if
449
+ the predecessor has it or can get it with reasonable efforts.
450
+
451
+ You may not impose any further restrictions on the exercise of the
452
+ rights granted or affirmed under this License. For example, you may
453
+ not impose a license fee, royalty, or other charge for exercise of
454
+ rights granted under this License, and you may not initiate litigation
455
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
456
+ any patent claim is infringed by making, using, selling, offering for
457
+ sale, or importing the Program or any portion of it.
458
+
459
+ 11. Patents.
460
+
461
+ A "contributor" is a copyright holder who authorizes use under this
462
+ License of the Program or a work on which the Program is based. The
463
+ work thus licensed is called the contributor's "contributor version".
464
+
465
+ A contributor's "essential patent claims" are all patent claims
466
+ owned or controlled by the contributor, whether already acquired or
467
+ hereafter acquired, that would be infringed by some manner, permitted
468
+ by this License, of making, using, or selling its contributor version,
469
+ but do not include claims that would be infringed only as a
470
+ consequence of further modification of the contributor version. For
471
+ purposes of this definition, "control" includes the right to grant
472
+ patent sublicenses in a manner consistent with the requirements of
473
+ this License.
474
+
475
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
476
+ patent license under the contributor's essential patent claims, to
477
+ make, use, sell, offer for sale, import and otherwise run, modify and
478
+ propagate the contents of its contributor version.
479
+
480
+ In the following three paragraphs, a "patent license" is any express
481
+ agreement or commitment, however denominated, not to enforce a patent
482
+ (such as an express permission to practice a patent or covenant not to
483
+ sue for patent infringement). To "grant" such a patent license to a
484
+ party means to make such an agreement or commitment not to enforce a
485
+ patent against the party.
486
+
487
+ If you convey a covered work, knowingly relying on a patent license,
488
+ and the Corresponding Source of the work is not available for anyone
489
+ to copy, free of charge and under the terms of this License, through a
490
+ publicly available network server or other readily accessible means,
491
+ then you must either (1) cause the Corresponding Source to be so
492
+ available, or (2) arrange to deprive yourself of the benefit of the
493
+ patent license for this particular work, or (3) arrange, in a manner
494
+ consistent with the requirements of this License, to extend the patent
495
+ license to downstream recipients. "Knowingly relying" means you have
496
+ actual knowledge that, but for the patent license, your conveying the
497
+ covered work in a country, or your recipient's use of the covered work
498
+ in a country, would infringe one or more identifiable patents in that
499
+ country that you have reason to believe are valid.
500
+
501
+ If, pursuant to or in connection with a single transaction or
502
+ arrangement, you convey, or propagate by procuring conveyance of, a
503
+ covered work, and grant a patent license to some of the parties
504
+ receiving the covered work authorizing them to use, propagate, modify
505
+ or convey a specific copy of the covered work, then the patent license
506
+ you grant is automatically extended to all recipients of the covered
507
+ work and works based on it.
508
+
509
+ A patent license is "discriminatory" if it does not include within
510
+ the scope of its coverage, prohibits the exercise of, or is
511
+ conditioned on the non-exercise of one or more of the rights that are
512
+ specifically granted under this License. You may not convey a covered
513
+ work if you are a party to an arrangement with a third party that is
514
+ in the business of distributing software, under which you make payment
515
+ to the third party based on the extent of your activity of conveying
516
+ the work, and under which the third party grants, to any of the
517
+ parties who would receive the covered work from you, a discriminatory
518
+ patent license (a) in connection with copies of the covered work
519
+ conveyed by you (or copies made from those copies), or (b) primarily
520
+ for and in connection with specific products or compilations that
521
+ contain the covered work, unless you entered into that arrangement,
522
+ or that patent license was granted, prior to 28 March 2007.
523
+
524
+ Nothing in this License shall be construed as excluding or limiting
525
+ any implied license or other defenses to infringement that may
526
+ otherwise be available to you under applicable patent law.
527
+
528
+ 12. No Surrender of Others' Freedom.
529
+
530
+ If conditions are imposed on you (whether by court order, agreement or
531
+ otherwise) that contradict the conditions of this License, they do not
532
+ excuse you from the conditions of this License. If you cannot convey a
533
+ covered work so as to satisfy simultaneously your obligations under this
534
+ License and any other pertinent obligations, then as a consequence you may
535
+ not convey it at all. For example, if you agree to terms that obligate you
536
+ to collect a royalty for further conveying from those to whom you convey
537
+ the Program, the only way you could satisfy both those terms and this
538
+ License would be to refrain entirely from conveying the Program.
539
+
540
+ 13. Remote Network Interaction; Use with the GNU General Public License.
541
+
542
+ Notwithstanding any other provision of this License, if you modify the
543
+ Program, your modified version must prominently offer all users
544
+ interacting with it remotely through a computer network (if your version
545
+ supports such interaction) an opportunity to receive the Corresponding
546
+ Source of your version by providing access to the Corresponding Source
547
+ from a network server at no charge, through some standard or customary
548
+ means of facilitating copying of software. This Corresponding Source
549
+ shall include the Corresponding Source for any work covered by version 3
550
+ of the GNU General Public License that is incorporated pursuant to the
551
+ following paragraph.
552
+
553
+ Notwithstanding any other provision of this License, you have
554
+ permission to link or combine any covered work with a work licensed
555
+ under version 3 of the GNU General Public License into a single
556
+ combined work, and to convey the resulting work. The terms of this
557
+ License will continue to apply to the part which is the covered work,
558
+ but the work with which it is combined will remain governed by version
559
+ 3 of the GNU General Public License.
560
+
561
+ 14. Revised Versions of this License.
562
+
563
+ The Free Software Foundation may publish revised and/or new versions of
564
+ the GNU Affero General Public License from time to time. Such new versions
565
+ will be similar in spirit to the present version, but may differ in detail to
566
+ address new problems or concerns.
567
+
568
+ Each version is given a distinguishing version number. If the
569
+ Program specifies that a certain numbered version of the GNU Affero General
570
+ Public License "or any later version" applies to it, you have the
571
+ option of following the terms and conditions either of that numbered
572
+ version or of any later version published by the Free Software
573
+ Foundation. If the Program does not specify a version number of the
574
+ GNU Affero General Public License, you may choose any version ever published
575
+ by the Free Software Foundation.
576
+
577
+ If the Program specifies that a proxy can decide which future
578
+ versions of the GNU Affero General Public License can be used, that proxy's
579
+ public statement of acceptance of a version permanently authorizes you
580
+ to choose that version for the Program.
581
+
582
+ Later license versions may give you additional or different
583
+ permissions. However, no additional obligations are imposed on any
584
+ author or copyright holder as a result of your choosing to follow a
585
+ later version.
586
+
587
+ 15. Disclaimer of Warranty.
588
+
589
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
590
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
591
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
592
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
593
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
594
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
595
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
596
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
597
+
598
+ 16. Limitation of Liability.
599
+
600
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
601
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
602
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
603
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
604
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
605
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
606
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
607
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
608
+ SUCH DAMAGES.
609
+
610
+ 17. Interpretation of Sections 15 and 16.
611
+
612
+ If the disclaimer of warranty and limitation of liability provided
613
+ above cannot be given local legal effect according to their terms,
614
+ reviewing courts shall apply local law that most closely approximates
615
+ an absolute waiver of all civil liability in connection with the
616
+ Program, unless a warranty or assumption of liability accompanies a
617
+ copy of the Program in return for a fee.
618
+
619
+ END OF TERMS AND CONDITIONS
620
+
621
+ How to Apply These Terms to Your New Programs
622
+
623
+ If you develop a new program, and you want it to be of the greatest
624
+ possible use to the public, the best way to achieve this is to make it
625
+ free software which everyone can redistribute and change under these terms.
626
+
627
+ To do so, attach the following notices to the program. It is safest
628
+ to attach them to the start of each source file to most effectively
629
+ state the exclusion of warranty; and each file should have at least
630
+ the "copyright" line and a pointer to where the full notice is found.
631
+
632
+ <one line to give the program's name and a brief idea of what it does.>
633
+ Copyright (C) <year> <name of author>
634
+
635
+ This program is free software: you can redistribute it and/or modify
636
+ it under the terms of the GNU Affero General Public License as published by
637
+ the Free Software Foundation, either version 3 of the License, or
638
+ (at your option) any later version.
639
+
640
+ This program is distributed in the hope that it will be useful,
641
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
642
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
643
+ GNU Affero General Public License for more details.
644
+
645
+ You should have received a copy of the GNU Affero General Public License
646
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
647
+
648
+ Also add information on how to contact you by electronic and paper mail.
649
+
650
+ If your software can interact with users remotely through a computer
651
+ network, you should also make sure that it provides a way for users to
652
+ get its source. For example, if your program is a web application, its
653
+ interface could display a "Source" link that leads users to an archive
654
+ of the code. There are many ways you could offer source, and different
655
+ solutions will be better for different programs; see section 13 for the
656
+ specific requirements.
657
+
658
+ You should also get your employer (if you work as a programmer) or school,
659
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
660
+ For more information on this, and how to apply and follow the GNU AGPL, see
661
+ <http://www.gnu.org/licenses/>.
data/build-utils/CMakeLists.txt ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ option(SLIC3R_ENC_CHECK "Verify encoding of source files" 1)
3
+
4
+ if (IS_CROSS_COMPILE)
5
+ # Force disable due to cross compilation. This fact is already printed on cli for users
6
+ set(SLIC3R_ENC_CHECK OFF CACHE BOOL "" FORCE)
7
+ endif ()
8
+
9
+ if (SLIC3R_ENC_CHECK)
10
+ add_executable(encoding-check encoding-check.cpp)
11
+
12
+ # A global no-op target which depends on all encodings checks,
13
+ # and on which in turn all checked targets depend.
14
+ # This is done to make encoding checks the first thing to be
15
+ # performed before actually compiling any sources of the checked targets
16
+ # to make the check fail as early as possible.
17
+ add_custom_target(global-encoding-check
18
+ ALL
19
+ DEPENDS encoding-check
20
+ )
21
+ endif()
22
+
23
+ # Function that adds source file encoding check to a target
24
+ # using the above encoding-check binary
25
+
26
+ function(encoding_check TARGET)
27
+ if (SLIC3R_ENC_CHECK)
28
+ # Obtain target source files
29
+ get_target_property(T_SOURCES ${TARGET} SOURCES)
30
+
31
+ # Define top-level encoding check target for this ${TARGET}
32
+ add_custom_target(encoding-check-${TARGET}
33
+ DEPENDS encoding-check ${T_SOURCES}
34
+ COMMENT "Checking source files encodings for target ${TARGET}"
35
+ )
36
+
37
+ # Add checking of each source file as a subcommand of encoding-check-${TARGET}
38
+ foreach(file ${T_SOURCES})
39
+ add_custom_command(TARGET encoding-check-${TARGET}
40
+ COMMAND $<TARGET_FILE:encoding-check> ${TARGET} ${file}
41
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
42
+ )
43
+ endforeach()
44
+
45
+ # This adds dependency on encoding-check-${TARGET} to ${TARET}
46
+ # via the global-encoding-check
47
+ add_dependencies(global-encoding-check encoding-check-${TARGET})
48
+ add_dependencies(${TARGET} global-encoding-check)
49
+ endif()
50
+ endfunction()
data/build-utils/encoding-check.cpp ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <vector>
2
+ #include <iostream>
3
+ #include <fstream>
4
+ #include <cstdlib>
5
+
6
+
7
+ /*
8
+ * The utf8_check() function scans the '\0'-terminated string starting
9
+ * at s. It returns a pointer to the first byte of the first malformed
10
+ * or overlong UTF-8 sequence found, or NULL if the string contains
11
+ * only correct UTF-8. It also spots UTF-8 sequences that could cause
12
+ * trouble if converted to UTF-16, namely surrogate characters
13
+ * (U+D800..U+DFFF) and non-Unicode positions (U+FFFE..U+FFFF). This
14
+ * routine is very likely to find a malformed sequence if the input
15
+ * uses any other encoding than UTF-8. It therefore can be used as a
16
+ * very effective heuristic for distinguishing between UTF-8 and other
17
+ * encodings.
18
+ *
19
+ * I wrote this code mainly as a specification of functionality; there
20
+ * are no doubt performance optimizations possible for certain CPUs.
21
+ *
22
+ * Markus Kuhn <http://www.cl.cam.ac.uk/~mgk25/> -- 2005-03-30
23
+ * License: http://www.cl.cam.ac.uk/~mgk25/short-license.html
24
+ */
25
+
26
+ unsigned char *utf8_check(unsigned char *s)
27
+ {
28
+ while (*s) {
29
+ if (*s < 0x80) {
30
+ // 0xxxxxxx
31
+ s++;
32
+ } else if ((s[0] & 0xe0) == 0xc0) {
33
+ // 110xxxxx 10xxxxxx
34
+ if ((s[1] & 0xc0) != 0x80 ||
35
+ (s[0] & 0xfe) == 0xc0) { // overlong?
36
+ return s;
37
+ } else {
38
+ s += 2;
39
+ }
40
+ } else if ((s[0] & 0xf0) == 0xe0) {
41
+ // 1110xxxx 10xxxxxx 10xxxxxx
42
+ if ((s[1] & 0xc0) != 0x80 ||
43
+ (s[2] & 0xc0) != 0x80 ||
44
+ (s[0] == 0xe0 && (s[1] & 0xe0) == 0x80) || // overlong?
45
+ (s[0] == 0xed && (s[1] & 0xe0) == 0xa0) || // surrogate?
46
+ (s[0] == 0xef && s[1] == 0xbf &&
47
+ (s[2] & 0xfe) == 0xbe)) { // U+FFFE or U+FFFF?
48
+ return s;
49
+ } else {
50
+ s += 3;
51
+ }
52
+ } else if ((s[0] & 0xf8) == 0xf0) {
53
+ // 11110xxX 10xxxxxx 10xxxxxx 10xxxxxx
54
+ if ((s[1] & 0xc0) != 0x80 ||
55
+ (s[2] & 0xc0) != 0x80 ||
56
+ (s[3] & 0xc0) != 0x80 ||
57
+ (s[0] == 0xf0 && (s[1] & 0xf0) == 0x80) || // overlong?
58
+ (s[0] == 0xf4 && s[1] > 0x8f) || s[0] > 0xf4) { // > U+10FFFF?
59
+ return s;
60
+ } else {
61
+ s += 4;
62
+ }
63
+ } else {
64
+ return s;
65
+ }
66
+ }
67
+
68
+ return NULL;
69
+ }
70
+
71
+
72
+ int main(int argc, char const *argv[])
73
+ {
74
+ if (argc != 3) {
75
+ std::cerr << "Usage: " << argv[0] << " <program/library> <file>" << std::endl;
76
+ return -1;
77
+ }
78
+
79
+ const char* target = argv[1];
80
+ const char* filename = argv[2];
81
+
82
+ const auto error_exit = [=](const char* error) {
83
+ std::cerr << "\n\tError: " << error << ": " << filename << "\n"
84
+ << "\tTarget: " << target << "\n"
85
+ << std::endl;
86
+ std::exit(-2);
87
+ };
88
+
89
+ std::ifstream file(filename, std::ios::binary | std::ios::ate);
90
+ const auto size = file.tellg();
91
+
92
+ if (size == 0) {
93
+ return 0;
94
+ }
95
+
96
+ file.seekg(0, std::ios::beg);
97
+ std::vector<char> buffer(size);
98
+
99
+ if (file.read(buffer.data(), size)) {
100
+ buffer.push_back('\0');
101
+
102
+ // Check UTF-8 validity
103
+ if (utf8_check(reinterpret_cast<unsigned char*>(buffer.data())) != nullptr) {
104
+ error_exit("Source file does not contain (valid) UTF-8");
105
+ }
106
+
107
+ // Check against a BOM mark
108
+ if (buffer.size() >= 3
109
+ && buffer[0] == '\xef'
110
+ && buffer[1] == '\xbb'
111
+ && buffer[2] == '\xbf') {
112
+ error_exit("Source file is valid UTF-8 but contains a BOM mark");
113
+ }
114
+ } else {
115
+ error_exit("Could not read source file");
116
+ }
117
+
118
+ return 0;
119
+ }
data/build_win.bat ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @REM /|/ Copyright (c) 2022 Jebtrix @Jebtrix
2
+ @REM /|/ Copyright (c) 2021 Justin Schuh @jschuh
3
+ @REM /|/
4
+ @REM /|/ PrusaSlicer is released under the terms of the AGPLv3 or higher
5
+ @REM /|/
6
+ @setlocal disableDelayedExpansion enableExtensions
7
+ @IF "%PS_ECHO_ON%" NEQ "" (echo on) ELSE (echo off)
8
+ @GOTO :MAIN
9
+ :HELP
10
+ @ECHO.
11
+ @ECHO Performs initial build or rebuild of the app (build) and deps (build/deps).
12
+ @ECHO Default options are determined from build directories and system state.
13
+ @ECHO.
14
+ @ECHO Usage: build_win [-ARCH ^<arch^>] [-CONFIG ^<config^>] [-VERSION ^<version^>]
15
+ @ECHO [-PRODUCT ^<product^>] [-DESTDIR ^<directory^>]
16
+ @ECHO [-STEPS ^<all^|all-dirty^|app^|app-dirty^|deps^|deps-dirty^>]
17
+ @ECHO [-RUN ^<console^|custom^|none^|viewer^|window^>]
18
+ @ECHO [-PRIORITY ^<normal^|low^>]
19
+ @ECHO.
20
+ @ECHO -a -ARCH Target processor architecture
21
+ @ECHO Default: %PS_ARCH_HOST%
22
+ @ECHO -c -CONFIG MSVC project config
23
+ @ECHO Default: %PS_CONFIG_DEFAULT%
24
+ @ECHO -v -VERSION Major version number of MSVC installation to use for build
25
+ @ECHO Default: %PS_VERSION_SUPPORTED%
26
+ @ECHO -p -PRODUCT Product ID of MSVC installation to use for build
27
+ @ECHO Default: %PS_PRODUCT_DEFAULT%
28
+ @ECHO -s -STEPS Performs only the specified build steps:
29
+ @ECHO all - clean and build deps and app
30
+ @ECHO all-dirty - build deps and app without cleaning
31
+ @ECHO app - clean and build main applications
32
+ @ECHO app-dirty - build main applications without cleaning
33
+ @ECHO deps - clean and build deps
34
+ @ECHO deps-dirty - build deps without cleaning
35
+ @ECHO Default: %PS_STEPS_DEFAULT%
36
+ @ECHO -r -RUN Specifies what to perform at the run step:
37
+ @ECHO console - run and wait on prusa-slicer-console.exe
38
+ @ECHO custom - run and wait on your custom build/%PS_CUSTOM_RUN_FILE%
39
+ @ECHO ide - open project in Visual Studio if not open (no wait)
40
+ @ECHO none - run step does nothing
41
+ @ECHO viewer - run prusa-gcodeviewer.exe (no wait)
42
+ @ECHO window - run prusa-slicer.exe (no wait)
43
+ @ECHO Default: none
44
+ @ECHO -d -DESTDIR Deps destination directory
45
+ @ECHO Warning: Changing destdir path will not delete the old destdir.
46
+ @ECHO Default: %PS_DESTDIR_DEFAULT_MSG%
47
+ @ECHO -p -PRIORITY Build CPU priority
48
+ @ECHO Default: normal
49
+ @ECHO.
50
+ @ECHO Examples:
51
+ @ECHO.
52
+ @ECHO Initial build: build_win -d "c:\src\PrusaSlicer-deps"
53
+ @ECHO Build post deps change: build_win -s all
54
+ @ECHO App dirty build: build_win
55
+ @ECHO App dirty build ^& run: build_win -r console
56
+ @ECHO All clean build ^& run: build_win -s all -r console -d "deps\build\out_deps"
57
+ @ECHO.
58
+ GOTO :END
59
+
60
+ :MAIN
61
+ REM Script constants
62
+ SET START_TIME=%TIME%
63
+ SET PS_START_DIR=%CD%
64
+ SET PS_SOLUTION_NAME=PrusaSlicer
65
+ SET PS_CHOICE_TIMEOUT=30
66
+ SET PS_CUSTOM_RUN_FILE=custom_run.bat
67
+ SET PS_DEPS_PATH_FILE_NAME=.DEPS_PATH.txt
68
+ SET PS_DEPS_PATH_FILE=%~dp0deps\build\%PS_DEPS_PATH_FILE_NAME%
69
+ SET PS_CONFIG_LIST="Debug;MinSizeRel;Release;RelWithDebInfo"
70
+
71
+ REM Update this script for new versions by setting PS_VERSION_SUPPORTED to a
72
+ REM new minimum version and setting PS_VERSION_EXCEEDED to the maximum supported
73
+ REM version plus one.
74
+ REM The officially supported toolchain versions are:
75
+ REM Minimum: 16 (Visual Studio 2019)
76
+ REM Maximum: 17 (Visual Studio 2022)
77
+ SET PS_VERSION_SUPPORTED=16
78
+ SET PS_VERSION_EXCEEDED=18
79
+ SET VSWHERE=%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe
80
+ IF NOT EXIST "%VSWHERE%" SET VSWHERE=%ProgramFiles%\Microsoft Visual Studio\Installer\vswhere.exe
81
+ FOR /F "tokens=4 USEBACKQ delims=." %%I IN (`"%VSWHERE%" -nologo -property productId`) DO SET PS_PRODUCT_DEFAULT=%%I
82
+ IF "%PS_PRODUCT_DEFAULT%" EQU "" (
83
+ SET EXIT_STATUS=-1
84
+ @ECHO ERROR: No Visual Studio installation found. 1>&2
85
+ GOTO :HELP
86
+ )
87
+ REM Default to the latest supported version if multiple are available
88
+ FOR /F "tokens=1 USEBACKQ delims=." %%I IN (
89
+ `^""%VSWHERE%" -version "[%PS_VERSION_SUPPORTED%,%PS_VERSION_EXCEEDED%)" -latest -nologo -property catalog_buildVersion^"`
90
+ ) DO SET PS_VERSION_SUPPORTED=%%I
91
+
92
+ REM Probe build directories and system state for reasonable default arguments
93
+ pushd %~dp0
94
+ SET PS_CONFIG=RelWithDebInfo
95
+ SET PS_ARCH=%PROCESSOR_ARCHITECTURE:amd64=x64%
96
+ CALL :TOLOWER PS_ARCH
97
+ SET PS_RUN=none
98
+ SET PS_DESTDIR=
99
+ SET PS_VERSION=
100
+ SET PS_PRODUCT=%PS_PRODUCT_DEFAULT%
101
+ SET PS_PRIORITY=normal
102
+ CALL :RESOLVE_DESTDIR_CACHE
103
+
104
+ REM Set up parameters used by help menu
105
+ SET EXIT_STATUS=0
106
+ SET PS_CONFIG_DEFAULT=%PS_CONFIG%
107
+ SET PS_ARCH_HOST=%PS_ARCH%
108
+ (echo " -help /help -h /h -? /? ")| findstr /I /C:" %~1 ">nul && GOTO :HELP
109
+
110
+ REM Parse arguments
111
+ SET EXIT_STATUS=1
112
+ SET PS_CURRENT_STEP=arguments
113
+ SET PARSER_STATE=
114
+ SET PARSER_FAIL=
115
+ FOR %%I in (%*) DO CALL :PARSE_OPTION "ARCH CONFIG DESTDIR STEPS RUN VERSION PRODUCT PRIORITY" PARSER_STATE "%%~I"
116
+ IF "%PARSER_FAIL%" NEQ "" (
117
+ @ECHO ERROR: Invalid switch: %PARSER_FAIL% 1>&2
118
+ GOTO :HELP
119
+ )ELSE IF "%PARSER_STATE%" NEQ "" (
120
+ @ECHO ERROR: Missing parameter for: %PARSER_STATE% 1>&2
121
+ GOTO :HELP
122
+ )
123
+
124
+ REM Validate arguments
125
+ SET PS_ASK_TO_CONTINUE=
126
+ CALL :TOLOWER PS_ARCH
127
+ SET PS_ARCH=%PS_ARCH:amd64=x64%
128
+ CALL :PARSE_OPTION_VALUE %PS_CONFIG_LIST:;= % PS_CONFIG
129
+ IF "%PS_CONFIG%" EQU "" GOTO :HELP
130
+ CALL :PARSE_OPTION_VALUE "normal low" PS_PRIORITY
131
+ SET PS_PRIORITY=%PS_PRIORITY:normal= %
132
+ SET PS_PRIORITY=%PS_PRIORITY:low=-low%
133
+ REM RESOLVE_DESTDIR_CACHE must go after PS_ARCH and PS_CONFIG, but before PS STEPS
134
+ CALL :RESOLVE_DESTDIR_CACHE
135
+ IF "%PS_STEPS%" EQU "" SET PS_STEPS=%PS_STEPS_DEFAULT%
136
+ CALL :PARSE_OPTION_VALUE "all all-dirty deps-dirty deps app-dirty app app-cmake" PS_STEPS
137
+ IF "%PS_STEPS%" EQU "" GOTO :HELP
138
+ (echo %PS_STEPS%)| findstr /I /C:"dirty">nul && SET PS_STEPS_DIRTY=1 || SET PS_STEPS_DIRTY=
139
+ IF "%PS_STEPS%" EQU "app-cmake" SET PS_STEPS_DIRTY=1
140
+ IF "%PS_DESTDIR%" EQU "" SET PS_DESTDIR=%PS_DESTDIR_CACHED%
141
+ IF "%PS_DESTDIR%" EQU "" (
142
+ @ECHO ERROR: Parameter required: -DESTDIR 1>&2
143
+ GOTO :HELP
144
+ )
145
+ CALL :CANONICALIZE_PATH PS_DESTDIR "%PS_START_DIR%"
146
+ IF "%PS_DESTDIR%" NEQ "%PS_DESTDIR_CACHED%" (
147
+ (echo "all deps all-dirty deps-dirty")| findstr /I /C:"%PS_STEPS%">nul || (
148
+ IF EXIST "%PS_DESTDIR%" (
149
+ @ECHO WARNING: DESTDIR does not match cache: 1>&2
150
+ @ECHO WARNING: new: %PS_DESTDIR% 1>&2
151
+ @ECHO WARNING: old: %PS_DESTDIR_CACHED% 1>&2
152
+ SET PS_ASK_TO_CONTINUE=1
153
+ ) ELSE (
154
+ @ECHO ERROR: Invalid parameter: DESTDIR=%PS_DESTDIR% 1>&2
155
+ GOTO :HELP
156
+ )
157
+ )
158
+ )
159
+ SET PS_DESTDIR_DEFAULT_MSG=
160
+ CALL :PARSE_OPTION_VALUE "console custom ide none viewer window" PS_RUN
161
+ IF "%PS_RUN%" EQU "" GOTO :HELP
162
+ IF "%PS_RUN%" NEQ "none" IF "%PS_STEPS:~0,4%" EQU "deps" (
163
+ @ECHO ERROR: RUN=%PS_RUN% specified with STEPS=%PS_STEPS%
164
+ @ECHO ERROR: RUN=none is the only valid option for STEPS "deps" or "deps-dirty"
165
+ GOTO :HELP
166
+ )
167
+ IF DEFINED PS_VERSION (
168
+ SET /A PS_VERSION_EXCEEDED=%PS_VERSION% + 1
169
+ ) ELSE SET PS_VERSION=%PS_VERSION_SUPPORTED%
170
+ SET MSVC_FILTER=-products Microsoft.VisualStudio.Product.%PS_PRODUCT% -version "[%PS_VERSION%,%PS_VERSION_EXCEEDED%)"
171
+ FOR /F "tokens=* USEBACKQ" %%I IN (`^""%VSWHERE%" %MSVC_FILTER% -nologo -property installationPath^"`) DO SET MSVC_DIR=%%I
172
+ IF NOT EXIST "%MSVC_DIR%" (
173
+ @ECHO ERROR: Compatible Visual Studio installation not found. 1>&2
174
+ GOTO :HELP
175
+ )
176
+ REM Cmake always defaults to latest supported MSVC generator. Let's make sure it uses what we select.
177
+ FOR /F "tokens=* USEBACKQ" %%I IN (`^""%VSWHERE%" %MSVC_FILTER% -nologo -property catalog_productLineVersion^"`) DO SET PS_PRODUCT_VERSION=%%I
178
+
179
+ REM Give the user a chance to cancel if we found something odd.
180
+ IF "%PS_ASK_TO_CONTINUE%" EQU "" GOTO :BUILD_ENV
181
+ @ECHO.
182
+ @ECHO Unexpected parameters detected. Build paused for %PS_CHOICE_TIMEOUT% seconds.
183
+ choice /T %PS_CHOICE_TIMEOUT% /C YN /D N /M "Continue"
184
+ IF %ERRORLEVEL% NEQ 1 GOTO :HELP
185
+
186
+ REM Set up MSVC environment
187
+ :BUILD_ENV
188
+ SET EXIT_STATUS=2
189
+ SET PS_CURRENT_STEP=environment
190
+ @ECHO **********************************************************************
191
+ @ECHO ** Build Config: %PS_CONFIG%
192
+ @ECHO ** Target Arch: %PS_ARCH%
193
+ @ECHO ** Build Steps: %PS_STEPS%
194
+ @ECHO ** Run App: %PS_RUN%
195
+ @ECHO ** Deps path: %PS_DESTDIR%
196
+ @ECHO ** Using Microsoft Visual Studio installation found at:
197
+ @ECHO ** %MSVC_DIR%
198
+ SET CMAKE_GENERATOR=Visual Studio %PS_VERSION% %PS_PRODUCT_VERSION%
199
+ CALL "%MSVC_DIR%\Common7\Tools\vsdevcmd.bat" -arch=%PS_ARCH% -host_arch=%PS_ARCH_HOST% -app_platform=Desktop
200
+ IF %ERRORLEVEL% NEQ 0 GOTO :END
201
+ REM Need to reset the echo state after vsdevcmd.bat clobbers it.
202
+ @IF "%PS_ECHO_ON%" NEQ "" (echo on) ELSE (echo off)
203
+ IF "%PS_DRY_RUN_ONLY%" NEQ "" (
204
+ @ECHO Script terminated early because PS_DRY_RUN_ONLY is set. 1>&2
205
+ GOTO :END
206
+ )
207
+ IF /I "%PS_STEPS:~0,3%" EQU "app" GOTO :BUILD_APP
208
+
209
+ REM Build deps
210
+ :BUILD_DEPS
211
+ SET EXIT_STATUS=3
212
+ SET PS_CURRENT_STEP=deps
213
+ IF "%PS_STEPS_DIRTY%" EQU "" (
214
+ CALL :MAKE_OR_CLEAN_DIRECTORY deps\build "%PS_DEPS_PATH_FILE_NAME%" .vs
215
+ CALL :MAKE_OR_CLEAN_DIRECTORY "%PS_DESTDIR%"
216
+ )
217
+ cd deps\build || GOTO :END
218
+ cmake.exe .. -DDESTDIR="%PS_DESTDIR%"
219
+ IF %ERRORLEVEL% NEQ 0 IF "%PS_STEPS_DIRTY%" NEQ "" (
220
+ (del CMakeCache.txt && cmake.exe .. -DDESTDIR="%PS_DESTDIR%") || GOTO :END
221
+ ) ELSE GOTO :END
222
+ (echo %PS_DESTDIR%)> "%PS_DEPS_PATH_FILE%"
223
+ msbuild /m ALL_BUILD.vcxproj /p:Configuration=%PS_CONFIG% /v:quiet %PS_PRIORITY% || GOTO :END
224
+ cd ..\..
225
+ IF /I "%PS_STEPS:~0,4%" EQU "deps" GOTO :RUN_APP
226
+
227
+ REM Build app
228
+ :BUILD_APP
229
+ SET EXIT_STATUS=4
230
+ SET PS_CURRENT_STEP=app
231
+ IF "%PS_STEPS_DIRTY%" EQU "" CALL :MAKE_OR_CLEAN_DIRECTORY build "%PS_CUSTOM_RUN_FILE%" .vs
232
+ cd build || GOTO :END
233
+ REM Make sure we have a custom batch file skeleton for the run stage
234
+ set PS_CUSTOM_BAT=%PS_CUSTOM_RUN_FILE%
235
+ CALL :CANONICALIZE_PATH PS_CUSTOM_BAT
236
+ IF NOT EXIST %PS_CUSTOM_BAT% CALL :WRITE_CUSTOM_SCRIPT_SKELETON %PS_CUSTOM_BAT%
237
+ SET PS_PROJECT_IS_OPEN=
238
+ FOR /F "tokens=2 delims=," %%I in (
239
+ 'tasklist /V /FI "IMAGENAME eq devenv.exe " /NH /FO CSV ^| find "%PS_SOLUTION_NAME%"'
240
+ ) do SET PS_PROJECT_IS_OPEN=%%~I
241
+ cmake.exe .. -DCMAKE_PREFIX_PATH="%PS_DESTDIR%\usr\local" -DCMAKE_CONFIGURATION_TYPES=%PS_CONFIG_LIST%
242
+ IF %ERRORLEVEL% NEQ 0 IF "%PS_STEPS_DIRTY%" NEQ "" (
243
+ (del CMakeCache.txt && cmake.exe .. -DCMAKE_PREFIX_PATH="%PS_DESTDIR%\usr\local" -DCMAKE_CONFIGURATION_TYPES=%PS_CONFIG_LIST%) || GOTO :END
244
+ ) ELSE GOTO :END
245
+ REM Skip the build step if we're using the undocumented app-cmake to regenerate the full config from inside devenv
246
+ IF "%PS_STEPS%" NEQ "app-cmake" msbuild /m ALL_BUILD.vcxproj /p:Configuration=%PS_CONFIG% /v:quiet %PS_PRIORITY% || GOTO :END
247
+ (echo %PS_DESTDIR%)> "%PS_DEPS_PATH_FILE_FOR_CONFIG%"
248
+
249
+ REM Run app
250
+ :RUN_APP
251
+ REM All build steps complete.
252
+ CALL :DIFF_TIME ELAPSED_TIME %START_TIME% %TIME%
253
+ IF "%PS_CURRENT_STEP%" NEQ "arguments" (
254
+ @ECHO.
255
+ @ECHO Total Build Time Elapsed %ELAPSED_TIME%
256
+ )
257
+ SET EXIT_STATUS=5
258
+ SET PS_CURRENT_STEP=run
259
+ IF "%PS_RUN%" EQU "none" GOTO :PROLOGUE
260
+ cd src\%PS_CONFIG% || GOTO :END
261
+ SET PS_PROJECT_IS_OPEN=
262
+ FOR /F "tokens=2 delims=," %%I in (
263
+ 'tasklist /V /FI "IMAGENAME eq devenv.exe " /NH /FO CSV ^| find "%PS_SOLUTION_NAME%"'
264
+ ) do SET PS_PROJECT_IS_OPEN=%%~I
265
+ @ECHO.
266
+ @ECHO Running %PS_RUN% application...
267
+ @REM icacls below is just a hack for file-not-found error handling
268
+ IF "%PS_RUN%" EQU "console" (
269
+ icacls prusa-slicer-console.exe >nul || GOTO :END
270
+ start /wait /b prusa-slicer-console.exe
271
+ ) ELSE IF "%PS_RUN%" EQU "window" (
272
+ icacls prusa-slicer.exe >nul || GOTO :END
273
+ start prusa-slicer.exe
274
+ ) ELSE IF "%PS_RUN%" EQU "viewer" (
275
+ icacls prusa-gcodeviewer.exe >nul || GOTO :END
276
+ start prusa-gcodeviewer.exe
277
+ ) ELSE IF "%PS_RUN%" EQU "custom" (
278
+ icacls %PS_CUSTOM_BAT% >nul || GOTO :END
279
+ CALL %PS_CUSTOM_BAT%
280
+ ) ELSE IF "%PS_RUN%" EQU "ide" (
281
+ IF "%PS_PROJECT_IS_OPEN%" NEQ "" (
282
+ @ECHO WARNING: Solution is already open in Visual Studio. Skipping ide run step. 1>&2
283
+ ) ELSE (
284
+ @ECHO Preparing to run Visual Studio...
285
+ cd ..\.. || GOTO :END
286
+ REM This hack generates a single config for MSVS, guaranteeing it gets set as the active config.
287
+ cmake.exe .. -DCMAKE_PREFIX_PATH="%PS_DESTDIR%\usr\local" -DCMAKE_CONFIGURATION_TYPES=%PS_CONFIG% > nul 2> nul || GOTO :END
288
+ REM Now launch devenv with the single config (setting it active) and a /command switch to re-run cmake and generate the full config list
289
+ start devenv.exe %PS_SOLUTION_NAME%.sln /command ^"shell /o ^^^"%~f0^^^" -d ^^^"%PS_DESTDIR%^^^" -c %PS_CONFIG% -a %PS_ARCH% -r none -s app-cmake^"
290
+ REM If devenv fails to launch just directly regenerate the full config list.
291
+ IF %ERRORLEVEL% NEQ 0 (
292
+ cmake.exe .. -DCMAKE_PREFIX_PATH="%PS_DESTDIR%\usr\local" -DCMAKE_CONFIGURATION_TYPES=%PS_CONFIG_LIST% 2> nul 1> nul || GOTO :END
293
+ )
294
+ )
295
+ )
296
+
297
+ @REM ********** DON'T ADD ANY CODE BETWEEN THESE TWO SECTIONS **********
298
+ @REM RUN_APP may hand off control, so let exit codes fall through to PROLOGUE.
299
+
300
+ :PROLOGUE
301
+ SET EXIT_STATUS=%ERRORLEVEL%
302
+ :END
303
+ @IF "%PS_ECHO_ON%%PS_DRY_RUN_ONLY%" NEQ "" (
304
+ @ECHO **********************************************************************
305
+ @ECHO ** Script Parameters:
306
+ @ECHO **********************************************************************
307
+ @SET PS_
308
+ )
309
+ IF "%EXIT_STATUS%" NEQ "0" (
310
+ IF "%PS_CURRENT_STEP%" NEQ "arguments" (
311
+ @ECHO.
312
+ @ECHO ERROR: *** Build process failed at %PS_CURRENT_STEP% step. *** 1>&2
313
+ )
314
+ ) ELSE (
315
+ @ECHO All steps completed successfully.
316
+ )
317
+ popd
318
+ exit /B %EXIT_STATUS%
319
+
320
+ GOTO :EOF
321
+ REM Functions and stubs start here.
322
+
323
+ :RESOLVE_DESTDIR_CACHE
324
+ @REM Resolves all DESTDIR cache values and sets PS_STEPS_DEFAULT
325
+ @REM Note: This just sets global variables, so it doesn't use setlocal.
326
+ SET PS_DEPS_PATH_FILE_FOR_CONFIG=%~dp0build\.vs\%PS_ARCH%\%PS_CONFIG%\%PS_DEPS_PATH_FILE_NAME%
327
+ mkdir "%~dp0build\.vs\%PS_ARCH%\%PS_CONFIG%" > nul 2> nul
328
+ REM Copy a legacy file if we don't have one in the proper location.
329
+ echo f|xcopy /D "%~dp0build\%PS_ARCH%\%PS_CONFIG%\%PS_DEPS_PATH_FILE_NAME%" "%PS_DEPS_PATH_FILE_FOR_CONFIG%" > nul 2> nul
330
+ CALL :CANONICALIZE_PATH PS_DEPS_PATH_FILE_FOR_CONFIG
331
+ IF EXIST "%PS_DEPS_PATH_FILE_FOR_CONFIG%" (
332
+ FOR /F "tokens=* USEBACKQ" %%I IN ("%PS_DEPS_PATH_FILE_FOR_CONFIG%") DO (
333
+ SET PS_DESTDIR_CACHED=%%I
334
+ SET PS_DESTDIR_DEFAULT_MSG=%%I
335
+ )
336
+ SET PS_STEPS_DEFAULT=app-dirty
337
+ ) ELSE IF EXIST "%PS_DEPS_PATH_FILE%" (
338
+ FOR /F "tokens=* USEBACKQ" %%I IN ("%PS_DEPS_PATH_FILE%") DO (
339
+ SET PS_DESTDIR_CACHED=%%I
340
+ SET PS_DESTDIR_DEFAULT_MSG=%%I
341
+ )
342
+ SET PS_STEPS_DEFAULT=app
343
+ ) ELSE (
344
+ SET PS_DESTDIR_CACHED=
345
+ SET PS_DESTDIR_DEFAULT_MSG=Cache missing. Argument required.
346
+ SET PS_STEPS_DEFAULT=all
347
+ )
348
+ GOTO :EOF
349
+
350
+ :PARSE_OPTION
351
+ @REM Argument parser called for each argument
352
+ @REM %1 - Valid option list
353
+ @REM %2 - Variable name for parser state; must be unset when parsing finished
354
+ @REM %3 - Current argument value
355
+ @REM PARSER_FAIL will be set on an error
356
+ @REM Note: Must avoid delayed expansion since filenames may contain ! character
357
+ setlocal disableDelayedExpansion
358
+ IF "%PARSER_FAIL%" NEQ "" GOTO :EOF
359
+ CALL SET LAST_ARG=%%%2%%
360
+ IF "%LAST_ARG%" EQU "" (
361
+ CALL :PARSE_OPTION_NAME %1 %~2 %~3 1
362
+ SET ARG_TYPE=NAME
363
+ ) ELSE (
364
+ SET PS_SET_COMMAND=SET PS_%LAST_ARG%=%~3
365
+ SET ARG_TYPE=LAST_ARG
366
+ SET %~2=
367
+ )
368
+ CALL SET LAST_ARG=%%%2%%
369
+ IF "%LAST_ARG%%ARG_TYPE%" EQU "NAME" SET PARSER_FAIL=%~3
370
+ (
371
+ endlocal
372
+ SET PARSER_FAIL=%PARSER_FAIL%
373
+ SET %~2=%LAST_ARG%
374
+ %PS_SET_COMMAND%
375
+ )
376
+ GOTO :EOF
377
+
378
+ :PARSE_OPTION_VALUE
379
+ setlocal disableDelayedExpansion
380
+ @REM Parses value and verifies it is within the supplied list
381
+ @REM %1 - Valid option list
382
+ @REM %2 - In/out variable name; unset on error
383
+ CALL SET NAME=%~2
384
+ CALL SET SAVED_VALUE=%%%NAME%%%
385
+ CALL :PARSE_OPTION_NAME %1 %NAME% -%SAVED_VALUE%
386
+ CALL SET NEW_VALUE=%%%NAME%%%
387
+ IF "%NEW_VALUE%" EQU "" (
388
+ @ECHO ERROR: Invalid parameter: %NAME:~3%=%SAVED_VALUE% 1>&2
389
+ )
390
+ endlocal & SET %NAME%=%NEW_VALUE%
391
+ GOTO :EOF
392
+
393
+ :PARSE_OPTION_NAME
394
+ @REM Parses an option name
395
+ @REM %1 - Valid option list
396
+ @REM %2 - Out variable name; unset on error
397
+ @REM %3 - Current argument value
398
+ @REM %4 - Boolean indicating single character switches are valid
399
+ @REM Note: Delayed expansion safe because ! character is invalid in option name
400
+ setlocal enableDelayedExpansion
401
+ IF "%4" NEQ "" FOR %%I IN (%~1) DO @(
402
+ SET SHORT_NAME=%%~I
403
+ SET SHORT_ARG_!SHORT_NAME:~0,1!=%%~I
404
+ )
405
+ @SET OPTION_NAME=%~3
406
+ @(echo %OPTION_NAME%)| findstr /R /C:"[-/]..*">nul || GOTO :PARSE_OPTION_NAME_FAIL
407
+ @SET OPTION_NAME=%OPTION_NAME:~1%
408
+ IF "%4" NEQ "" (
409
+ IF "%OPTION_NAME%" EQU "%OPTION_NAME:~0,1%" (
410
+ IF "!SHORT_ARG_%OPTION_NAME:~0,1%!" NEQ "" SET OPTION_NAME=!SHORT_ARG_%OPTION_NAME:~0,1%!
411
+ )
412
+ )
413
+ @(echo %OPTION_NAME%)| findstr /R /C:".[ ][ ]*.">nul && GOTO :PARSE_OPTION_NAME_FAIL
414
+ @(echo %~1 )| findstr /I /C:" %OPTION_NAME% ">nul || GOTO :PARSE_OPTION_NAME_FAIL
415
+ FOR %%I IN (%~1) DO SET OPTION_NAME=!OPTION_NAME:%%~I=%%~I!
416
+ endlocal & SET %~2=%OPTION_NAME%
417
+ GOTO :EOF
418
+ :PARSE_OPTION_NAME_FAIL
419
+ endlocal & SET %~2=
420
+ GOTO :EOF
421
+
422
+ :MAKE_OR_CLEAN_DIRECTORY
423
+ @REM Create directory if it doesn't exist or clean it if it does
424
+ @REM %1 - Directory path to clean or create
425
+ @REM %* - Optional list of files/dirs to keep (in the base directory only)
426
+ setlocal disableDelayedExpansion
427
+ IF NOT EXIST "%~1" (
428
+ @ECHO Creating %~1
429
+ mkdir "%~1" && (
430
+ endlocal
431
+ GOTO :EOF
432
+ )
433
+ )
434
+ @ECHO Cleaning %~1 ...
435
+ SET KEEP_LIST=
436
+ :MAKE_OR_CLEAN_DIRECTORY_ARG_LOOP
437
+ IF "%~2" NEQ "" (
438
+ SET KEEP_LIST=%KEEP_LIST% "%~2"
439
+ SHIFT /2
440
+ GOTO :MAKE_OR_CLEAN_DIRECTORY_ARG_LOOP
441
+ )
442
+ for /F "usebackq delims=" %%I in (`dir /a /b "%~1"`) do (
443
+ (echo %KEEP_LIST%)| findstr /I /L /C:"\"%%I\"">nul || (
444
+ rmdir /s /q "%~1\%%I" 2>nul ) || del /q /f "%~1\%%I"
445
+ )
446
+ endlocal
447
+ GOTO :EOF
448
+
449
+ :TOLOWER
450
+ @REM Converts supplied environment variable to lowercase
451
+ @REM %1 - Input/output variable name
452
+ @REM Note: This is slow on very long strings, but is used only on very short ones
453
+ setlocal disableDelayedExpansion
454
+ @FOR %%b IN (a b c d e f g h i j k l m n o p q r s t u v w x y z) DO @CALL set %~1=%%%1:%%b=%%b%%
455
+ @CALL SET OUTPUT=%%%~1%%
456
+ endlocal & SET %~1=%OUTPUT%
457
+ GOTO :EOF
458
+
459
+ :CANONICALIZE_PATH
460
+ @REM Canonicalizes the path in the supplied variable
461
+ @REM %1 - Input/output variable containing path to canonicalize
462
+ @REM %2 - Optional base directory
463
+ setlocal
464
+ CALL :CANONICALIZE_PATH_INNER %1 %%%~1%% %2
465
+ endlocal & SET %~1=%OUTPUT%
466
+ GOTO :EOF
467
+ :CANONICALIZE_PATH_INNER
468
+ if "%~3" NEQ "" (pushd %3 || GOTO :EOF)
469
+ SET OUTPUT=%~f2
470
+ if "%~3" NEQ "" popd
471
+ GOTO :EOF
472
+
473
+ :DIFF_TIME
474
+ @REM Calculates elapsed time between two timestamps (TIME environment variable format)
475
+ @REM %1 - Output variable
476
+ @REM %2 - Start time
477
+ @REM %3 - End time
478
+ setlocal EnableDelayedExpansion
479
+ set START_ARG=%2
480
+ set END_ARG=%3
481
+ set END=!END_ARG:%TIME:~8,1%=%%100)*100+1!
482
+ set START=!START_ARG:%TIME:~8,1%=%%100)*100+1!
483
+ set /A DIFF=((((10!END:%TIME:~2,1%=%%100)*60+1!%%100)-((((10!START:%TIME:~2,1%=%%100)*60+1!%%100), DIFF-=(DIFF^>^>31)*24*60*60*100
484
+ set /A CC=DIFF%%100+100,DIFF/=100,SS=DIFF%%60+100,DIFF/=60,MM=DIFF%%60+100,HH=DIFF/60+100
485
+ @endlocal & set %1=%HH:~1%%TIME:~2,1%%MM:~1%%TIME:~2,1%%SS:~1%%TIME:~8,1%%CC:~1%
486
+ @GOTO :EOF
487
+
488
+ :WRITE_CUSTOM_SCRIPT_SKELETON
489
+ @REM Writes the following text to the supplied file
490
+ @REM %1 - Output filename
491
+ setlocal
492
+ @(
493
+ ECHO @ECHO.
494
+ ECHO @ECHO ********************************************************************************
495
+ ECHO @ECHO ** This is a custom run script skeleton.
496
+ ECHO @ECHO ********************************************************************************
497
+ ECHO @ECHO.
498
+ ECHO @ECHO ********************************************************************************
499
+ ECHO @ECHO ** The working directory is:
500
+ ECHO @ECHO ********************************************************************************
501
+ ECHO dir
502
+ ECHO @ECHO.
503
+ ECHO @ECHO ********************************************************************************
504
+ ECHO @ECHO ** The environment is:
505
+ ECHO @ECHO ********************************************************************************
506
+ ECHO set
507
+ ECHO @ECHO.
508
+ ECHO @ECHO ********************************************************************************
509
+ ECHO @ECHO ** Edit or replace this script to run custom steps after a successful build:
510
+ ECHO @ECHO ** %~1
511
+ ECHO @ECHO ********************************************************************************
512
+ ECHO @ECHO.
513
+ ) > "%~1"
514
+ endlocal
515
+ GOTO :EOF
data/bundled_deps/CMakeLists.txt ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ add_subdirectory(admesh)
2
+ add_subdirectory(avrdude)
3
+ add_subdirectory(miniz)
4
+ add_subdirectory(glu-libtess)
5
+ add_subdirectory(agg)
6
+ add_subdirectory(libigl)
7
+ add_subdirectory(hints)
8
+ add_subdirectory(libnest2d)
9
+
10
+ add_library(semver STATIC
11
+ semver/semver.c
12
+ semver/semver.h
13
+ )
14
+ target_include_directories(semver PUBLIC semver)
15
+ encoding_check(semver)
16
+
17
+ add_library(qoi STATIC
18
+ qoi/qoilib.c
19
+ qoi/qoi.h
20
+ )
21
+ target_include_directories(qoi PUBLIC qoi)
22
+ encoding_check(qoi)
23
+
24
+ add_library(fastfloat INTERFACE)
25
+ target_include_directories(fastfloat INTERFACE fast_float)
26
+
27
+ add_library(int128 INTERFACE)
28
+ target_include_directories(int128 INTERFACE int128)
29
+
30
+ add_library(localesutils STATIC
31
+ localesutils/LocalesUtils.cpp
32
+ localesutils/LocalesUtils.hpp
33
+ )
34
+ target_include_directories(localesutils PUBLIC localesutils)
35
+ target_link_libraries(localesutils PRIVATE fastfloat)
36
+
37
+ add_library(ankerl INTERFACE)
38
+ target_include_directories(ankerl INTERFACE ankerl)
39
+
40
+ add_library(stb_dxt INTERFACE)
41
+ target_include_directories(stb_dxt INTERFACE stb_dxt)
42
+
43
+ add_library(stb_image INTERFACE)
44
+ target_include_directories(stb_image INTERFACE stb_image)
45
+
46
+ add_library(tcbspan INTERFACE)
47
+ target_include_directories(tcbspan INTERFACE tcbspan)
48
+
49
+ if (SLIC3R_GUI)
50
+ add_subdirectory(imgui)
51
+ add_subdirectory(hidapi)
52
+ endif ()
data/bundled_deps/admesh/CMakeLists.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cmake_minimum_required(VERSION 2.8.12)
2
+ project(admesh)
3
+
4
+ add_library(admesh STATIC
5
+ admesh/connect.cpp
6
+ admesh/normals.cpp
7
+ admesh/shared.cpp
8
+ admesh/stl.h
9
+ admesh/stl_io.cpp
10
+ admesh/stlinit.cpp
11
+ admesh/util.cpp
12
+ )
13
+
14
+ target_include_directories(admesh PUBLIC .)
15
+ target_link_libraries(admesh PRIVATE boost_headeronly localesutils)
16
+ target_link_libraries(admesh PUBLIC Eigen3::Eigen)
data/bundled_deps/admesh/admesh/connect.cpp ADDED
@@ -0,0 +1,743 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ADMesh -- process triangulated solid meshes
2
+ * Copyright (C) 1995, 1996 Anthony D. Martin <[email protected]>
3
+ * Copyright (C) 2013, 2014 several contributors, see AUTHORS
4
+ *
5
+ * This program is free software; you can redistribute it and/or modify
6
+ * it under the terms of the GNU General Public License as published by
7
+ * the Free Software Foundation; either version 2 of the License, or
8
+ * (at your option) any later version.
9
+
10
+ * This program is distributed in the hope that it will be useful,
11
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
+ * GNU General Public License for more details.
14
+
15
+ * You should have received a copy of the GNU General Public License along
16
+ * with this program; if not, write to the Free Software Foundation, Inc.,
17
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18
+ *
19
+ * Questions, comments, suggestions, etc to
20
+ * https://github.com/admesh/admesh/issues
21
+ */
22
+
23
+ #include <stdio.h>
24
+ #include <stdlib.h>
25
+ #include <string.h>
26
+ #include <math.h>
27
+
28
+ #include <algorithm>
29
+ #include <vector>
30
+
31
+ #include <boost/predef/other/endian.h>
32
+ #include <boost/log/trivial.hpp>
33
+ // Boost pool: Don't use mutexes to synchronize memory allocation.
34
+ #define BOOST_POOL_NO_MT
35
+ #include <boost/pool/object_pool.hpp>
36
+
37
+ #include "stl.h"
38
+
39
+ struct HashEdge {
40
+ // Key of a hash edge: sorted vertices of the edge.
41
+ uint32_t key[6];
42
+ // Compare two keys.
43
+ bool operator==(const HashEdge &rhs) const { return memcmp(key, rhs.key, sizeof(key)) == 0; }
44
+ bool operator!=(const HashEdge &rhs) const { return ! (*this == rhs); }
45
+ int hash(int M) const { return ((key[0] / 11 + key[1] / 7 + key[2] / 3) ^ (key[3] / 11 + key[4] / 7 + key[5] / 3)) % M; }
46
+
47
+ // Index of a facet owning this edge.
48
+ int facet_number;
49
+ // Index of this edge inside the facet with an index of facet_number.
50
+ // If this edge is stored backwards, which_edge is increased by 3.
51
+ int which_edge;
52
+ HashEdge *next;
53
+
54
+ void load_exact(stl_file *stl, const stl_vertex *a, const stl_vertex *b)
55
+ {
56
+ {
57
+ stl_vertex diff = (*a - *b).cwiseAbs();
58
+ float max_diff = std::max(diff(0), std::max(diff(1), diff(2)));
59
+ stl->stats.shortest_edge = std::min(max_diff, stl->stats.shortest_edge);
60
+ }
61
+
62
+ // Ensure identical vertex ordering of equal edges.
63
+ // This method is numerically robust.
64
+ if (vertex_lower(*a, *b)) {
65
+ } else {
66
+ // This edge is loaded backwards.
67
+ std::swap(a, b);
68
+ this->which_edge += 3;
69
+ }
70
+ memcpy(&this->key[0], a->data(), sizeof(stl_vertex));
71
+ memcpy(&this->key[3], b->data(), sizeof(stl_vertex));
72
+ // Switch negative zeros to positive zeros, so memcmp will consider them to be equal.
73
+ for (size_t i = 0; i < 6; ++ i) {
74
+ unsigned char *p = (unsigned char*)(this->key + i);
75
+ #if BOOST_ENDIAN_LITTLE_BYTE
76
+ if (p[0] == 0 && p[1] == 0 && p[2] == 0 && p[3] == 0x80)
77
+ // Negative zero, switch to positive zero.
78
+ p[3] = 0;
79
+ #else /* BOOST_ENDIAN_LITTLE_BYTE */
80
+ if (p[0] == 0x80 && p[1] == 0 && p[2] == 0 && p[3] == 0)
81
+ // Negative zero, switch to positive zero.
82
+ p[0] = 0;
83
+ #endif /* BOOST_ENDIAN_LITTLE_BYTE */
84
+ }
85
+ }
86
+
87
+ bool load_nearby(const stl_file *stl, const stl_vertex &a, const stl_vertex &b, float tolerance)
88
+ {
89
+ // Index of a grid cell spaced by tolerance.
90
+ typedef Eigen::Matrix<int32_t, 3, 1, Eigen::DontAlign> Vec3i;
91
+ Vec3i vertex1 = ((a - stl->stats.min) / tolerance).cast<int32_t>();
92
+ Vec3i vertex2 = ((b - stl->stats.min) / tolerance).cast<int32_t>();
93
+ static_assert(sizeof(Vec3i) == 12, "size of Vec3i incorrect");
94
+
95
+ if (vertex1 == vertex2)
96
+ // Both vertices hash to the same value
97
+ return false;
98
+
99
+ // Ensure identical vertex ordering of edges, which vertices land into equal grid cells.
100
+ // This method is numerically robust.
101
+ if ((vertex1[0] != vertex2[0]) ?
102
+ (vertex1[0] < vertex2[0]) :
103
+ ((vertex1[1] != vertex2[1]) ?
104
+ (vertex1[1] < vertex2[1]) :
105
+ (vertex1[2] < vertex2[2]))) {
106
+ memcpy(&this->key[0], vertex1.data(), sizeof(stl_vertex));
107
+ memcpy(&this->key[3], vertex2.data(), sizeof(stl_vertex));
108
+ } else {
109
+ memcpy(&this->key[0], vertex2.data(), sizeof(stl_vertex));
110
+ memcpy(&this->key[3], vertex1.data(), sizeof(stl_vertex));
111
+ this->which_edge += 3; /* this edge is loaded backwards */
112
+ }
113
+ return true;
114
+ }
115
+
116
+ private:
117
+ inline bool vertex_lower(const stl_vertex &a, const stl_vertex &b) {
118
+ return (a(0) != b(0)) ? (a(0) < b(0)) :
119
+ ((a(1) != b(1)) ? (a(1) < b(1)) : (a(2) < b(2)));
120
+ }
121
+ };
122
+
123
+ struct HashTableEdges {
124
+ HashTableEdges(size_t number_of_faces) {
125
+ this->M = (int)hash_size_from_nr_faces(number_of_faces);
126
+ this->heads.assign(this->M, nullptr);
127
+ this->tail = pool.construct();
128
+ this->tail->next = this->tail;
129
+ for (int i = 0; i < this->M; ++ i)
130
+ this->heads[i] = this->tail;
131
+ }
132
+ ~HashTableEdges() {
133
+ #ifndef NDEBUG
134
+ for (int i = 0; i < this->M; ++ i)
135
+ for (HashEdge *temp = this->heads[i]; temp != this->tail; temp = temp->next)
136
+ ++ this->freed;
137
+ this->tail = nullptr;
138
+ #endif /* NDEBUG */
139
+ }
140
+
141
+ void insert_edge_exact(stl_file *stl, const HashEdge &edge)
142
+ {
143
+ this->insert_edge(stl, edge, [stl](const HashEdge& edge1, const HashEdge& edge2) { record_neighbors(stl, edge1, edge2); });
144
+ }
145
+
146
+ void insert_edge_nearby(stl_file *stl, const HashEdge &edge)
147
+ {
148
+ this->insert_edge(stl, edge, [stl](const HashEdge& edge1, const HashEdge& edge2) { match_neighbors_nearby(stl, edge1, edge2); });
149
+ }
150
+
151
+ // Hash table on edges
152
+ std::vector<HashEdge*> heads;
153
+ HashEdge* tail;
154
+ int M;
155
+ boost::object_pool<HashEdge> pool;
156
+
157
+ #ifndef NDEBUG
158
+ size_t malloced = 0;
159
+ size_t freed = 0;
160
+ size_t collisions = 0;
161
+ #endif /* NDEBUG */
162
+
163
+ private:
164
+ static inline size_t hash_size_from_nr_faces(const size_t nr_faces)
165
+ {
166
+ // Good primes for addressing a cca. 30 bit space.
167
+ // https://planetmath.org/goodhashtableprimes
168
+ static std::vector<uint32_t> primes{ 98317, 196613, 393241, 786433, 1572869, 3145739, 6291469, 12582917, 25165843, 50331653, 100663319, 201326611, 402653189, 805306457, 1610612741 };
169
+ // Find a prime number for 50% filling of the shared triangle edges in the mesh.
170
+ auto it = std::upper_bound(primes.begin(), primes.end(), nr_faces * 3 * 2 - 1);
171
+ return (it == primes.end()) ? primes.back() : *it;
172
+ }
173
+
174
+
175
+ // MatchNeighbors(stl_file *stl, const HashEdge &edge_a, const HashEdge &edge_b)
176
+ template<typename MatchNeighbors>
177
+ void insert_edge(stl_file *stl, const HashEdge &edge, MatchNeighbors match_neighbors)
178
+ {
179
+ int chain_number = edge.hash(this->M);
180
+ HashEdge *link = this->heads[chain_number];
181
+ if (link == this->tail) {
182
+ // This list doesn't have any edges currently in it. Add this one.
183
+ HashEdge *new_edge = pool.construct(edge);
184
+ #ifndef NDEBUG
185
+ ++ this->malloced;
186
+ #endif /* NDEBUG */
187
+ new_edge->next = this->tail;
188
+ this->heads[chain_number] = new_edge;
189
+ } else if (edges_equal(edge, *link)) {
190
+ // This is a match. Record result in neighbors list.
191
+ match_neighbors(edge, *link);
192
+ // Delete the matched edge from the list.
193
+ this->heads[chain_number] = link->next;
194
+ // pool.destroy(link);
195
+ #ifndef NDEBUG
196
+ ++ this->freed;
197
+ #endif /* NDEBUG */
198
+ } else {
199
+ // Continue through the rest of the list.
200
+ for (;;) {
201
+ if (link->next == this->tail) {
202
+ // This is the last item in the list. Insert a new edge.
203
+ HashEdge *new_edge = pool.construct();
204
+ #ifndef NDEBUG
205
+ ++ this->malloced;
206
+ #endif /* NDEBUG */
207
+ *new_edge = edge;
208
+ new_edge->next = this->tail;
209
+ link->next = new_edge;
210
+ #ifndef NDEBUG
211
+ ++ this->collisions;
212
+ #endif /* NDEBUG */
213
+ break;
214
+ }
215
+ if (edges_equal(edge, *link->next)) {
216
+ // This is a match. Record result in neighbors list.
217
+ match_neighbors(edge, *link->next);
218
+ // Delete the matched edge from the list.
219
+ HashEdge *temp = link->next;
220
+ link->next = link->next->next;
221
+ // pool.destroy(temp);
222
+ #ifndef NDEBUG
223
+ ++ this->freed;
224
+ #endif /* NDEBUG */
225
+ break;
226
+ }
227
+ // This is not a match. Go to the next link.
228
+ link = link->next;
229
+ #ifndef NDEBUG
230
+ ++ this->collisions;
231
+ #endif /* NDEBUG */
232
+ }
233
+ }
234
+ }
235
+
236
+ // Edges equal for hashing. Edgesof different facet are allowed to be matched.
237
+ static inline bool edges_equal(const HashEdge &edge_a, const HashEdge &edge_b)
238
+ {
239
+ return edge_a.facet_number != edge_b.facet_number && edge_a == edge_b;
240
+ }
241
+
242
+ // Connect edge_a with edge_b, update edge connection statistics.
243
+ static void record_neighbors(stl_file *stl, const HashEdge &edge_a, const HashEdge &edge_b)
244
+ {
245
+ // Facet a's neighbor is facet b
246
+ stl->neighbors_start[edge_a.facet_number].neighbor[edge_a.which_edge % 3] = edge_b.facet_number; /* sets the .neighbor part */
247
+ stl->neighbors_start[edge_a.facet_number].which_vertex_not[edge_a.which_edge % 3] = (edge_b.which_edge + 2) % 3; /* sets the .which_vertex_not part */
248
+
249
+ // Facet b's neighbor is facet a
250
+ stl->neighbors_start[edge_b.facet_number].neighbor[edge_b.which_edge % 3] = edge_a.facet_number; /* sets the .neighbor part */
251
+ stl->neighbors_start[edge_b.facet_number].which_vertex_not[edge_b.which_edge % 3] = (edge_a.which_edge + 2) % 3; /* sets the .which_vertex_not part */
252
+
253
+ if ((edge_a.which_edge < 3 && edge_b.which_edge < 3) || (edge_a.which_edge > 2 && edge_b.which_edge > 2)) {
254
+ // These facets are oriented in opposite directions, their normals are probably messed up.
255
+ stl->neighbors_start[edge_a.facet_number].which_vertex_not[edge_a.which_edge % 3] += 3;
256
+ stl->neighbors_start[edge_b.facet_number].which_vertex_not[edge_b.which_edge % 3] += 3;
257
+ }
258
+
259
+ // Count successful connects:
260
+ // Total connects:
261
+ stl->stats.connected_edges += 2;
262
+ // Count individual connects:
263
+ switch (stl->neighbors_start[edge_a.facet_number].num_neighbors()) {
264
+ case 1: ++ stl->stats.connected_facets_1_edge; break;
265
+ case 2: ++ stl->stats.connected_facets_2_edge; break;
266
+ case 3: ++ stl->stats.connected_facets_3_edge; break;
267
+ default: assert(false);
268
+ }
269
+ switch (stl->neighbors_start[edge_b.facet_number].num_neighbors()) {
270
+ case 1: ++ stl->stats.connected_facets_1_edge; break;
271
+ case 2: ++ stl->stats.connected_facets_2_edge; break;
272
+ case 3: ++ stl->stats.connected_facets_3_edge; break;
273
+ default: assert(false);
274
+ }
275
+ }
276
+
277
+ static void match_neighbors_nearby(stl_file *stl, const HashEdge &edge_a, const HashEdge &edge_b)
278
+ {
279
+ record_neighbors(stl, edge_a, edge_b);
280
+
281
+ // Which vertices to change
282
+ int facet1 = -1;
283
+ int facet2 = -1;
284
+ int vertex1, vertex2;
285
+ stl_vertex new_vertex1, new_vertex2;
286
+ {
287
+ int v1a; // pair 1, facet a
288
+ int v1b; // pair 1, facet b
289
+ int v2a; // pair 2, facet a
290
+ int v2b; // pair 2, facet b
291
+ // Find first pair.
292
+ if (edge_a.which_edge < 3) {
293
+ v1a = edge_a.which_edge;
294
+ v2a = (edge_a.which_edge + 1) % 3;
295
+ } else {
296
+ v2a = edge_a.which_edge % 3;
297
+ v1a = (edge_a.which_edge + 1) % 3;
298
+ }
299
+ if (edge_b.which_edge < 3) {
300
+ v1b = edge_b.which_edge;
301
+ v2b = (edge_b.which_edge + 1) % 3;
302
+ } else {
303
+ v2b = edge_b.which_edge % 3;
304
+ v1b = (edge_b.which_edge + 1) % 3;
305
+ }
306
+
307
+ // Of the first pair, which vertex, if any, should be changed
308
+ if (stl->facet_start[edge_a.facet_number].vertex[v1a] != stl->facet_start[edge_b.facet_number].vertex[v1b]) {
309
+ // These facets are different.
310
+ if ( (stl->neighbors_start[edge_a.facet_number].neighbor[v1a] == -1)
311
+ && (stl->neighbors_start[edge_a.facet_number].neighbor[(v1a + 2) % 3] == -1)) {
312
+ // This vertex has no neighbors. This is a good one to change.
313
+ facet1 = edge_a.facet_number;
314
+ vertex1 = v1a;
315
+ new_vertex1 = stl->facet_start[edge_b.facet_number].vertex[v1b];
316
+ } else {
317
+ facet1 = edge_b.facet_number;
318
+ vertex1 = v1b;
319
+ new_vertex1 = stl->facet_start[edge_a.facet_number].vertex[v1a];
320
+ }
321
+ }
322
+
323
+ // Of the second pair, which vertex, if any, should be changed.
324
+ if (stl->facet_start[edge_a.facet_number].vertex[v2a] == stl->facet_start[edge_b.facet_number].vertex[v2b]) {
325
+ // These facets are different.
326
+ if ( (stl->neighbors_start[edge_a.facet_number].neighbor[v2a] == -1)
327
+ && (stl->neighbors_start[edge_a.facet_number].neighbor[(v2a + 2) % 3] == -1)) {
328
+ // This vertex has no neighbors. This is a good one to change.
329
+ facet2 = edge_a.facet_number;
330
+ vertex2 = v2a;
331
+ new_vertex2 = stl->facet_start[edge_b.facet_number].vertex[v2b];
332
+ } else {
333
+ facet2 = edge_b.facet_number;
334
+ vertex2 = v2b;
335
+ new_vertex2 = stl->facet_start[edge_a.facet_number].vertex[v2a];
336
+ }
337
+ }
338
+ }
339
+
340
+ auto change_vertices = [stl](int facet_num, int vnot, stl_vertex new_vertex)
341
+ {
342
+ int first_facet = facet_num;
343
+ bool direction = false;
344
+
345
+ for (;;) {
346
+ int pivot_vertex;
347
+ int next_edge;
348
+ if (vnot > 2) {
349
+ if (direction) {
350
+ pivot_vertex = (vnot + 1) % 3;
351
+ next_edge = vnot % 3;
352
+ }
353
+ else {
354
+ pivot_vertex = (vnot + 2) % 3;
355
+ next_edge = pivot_vertex;
356
+ }
357
+ direction = !direction;
358
+ }
359
+ else {
360
+ if (direction) {
361
+ pivot_vertex = (vnot + 2) % 3;
362
+ next_edge = pivot_vertex;
363
+ }
364
+ else {
365
+ pivot_vertex = (vnot + 1) % 3;
366
+ next_edge = vnot;
367
+ }
368
+ }
369
+ #if 0
370
+ if (stl->facet_start[facet_num].vertex[pivot_vertex](0) == new_vertex(0) &&
371
+ stl->facet_start[facet_num].vertex[pivot_vertex](1) == new_vertex(1) &&
372
+ stl->facet_start[facet_num].vertex[pivot_vertex](2) == new_vertex(2))
373
+ printf("Changing vertex %f,%f,%f: Same !!!\r\n", new_vertex(0), new_vertex(1), new_vertex(2));
374
+ else {
375
+ if (stl->facet_start[facet_num].vertex[pivot_vertex](0) != new_vertex(0))
376
+ printf("Changing coordinate x, vertex %e (0x%08x) to %e(0x%08x)\r\n",
377
+ stl->facet_start[facet_num].vertex[pivot_vertex](0),
378
+ *reinterpret_cast<const int*>(&stl->facet_start[facet_num].vertex[pivot_vertex](0)),
379
+ new_vertex(0),
380
+ *reinterpret_cast<const int*>(&new_vertex(0)));
381
+ if (stl->facet_start[facet_num].vertex[pivot_vertex](1) != new_vertex(1))
382
+ printf("Changing coordinate x, vertex %e (0x%08x) to %e(0x%08x)\r\n",
383
+ stl->facet_start[facet_num].vertex[pivot_vertex](1),
384
+ *reinterpret_cast<const int*>(&stl->facet_start[facet_num].vertex[pivot_vertex](1)),
385
+ new_vertex(1),
386
+ *reinterpret_cast<const int*>(&new_vertex(1)));
387
+ if (stl->facet_start[facet_num].vertex[pivot_vertex](2) != new_vertex(2))
388
+ printf("Changing coordinate x, vertex %e (0x%08x) to %e(0x%08x)\r\n",
389
+ stl->facet_start[facet_num].vertex[pivot_vertex](2),
390
+ *reinterpret_cast<const int*>(&stl->facet_start[facet_num].vertex[pivot_vertex](2)),
391
+ new_vertex(2),
392
+ *reinterpret_cast<const int*>(&new_vertex(2)));
393
+ }
394
+ #endif
395
+ stl->facet_start[facet_num].vertex[pivot_vertex] = new_vertex;
396
+ vnot = stl->neighbors_start[facet_num].which_vertex_not[next_edge];
397
+ facet_num = stl->neighbors_start[facet_num].neighbor[next_edge];
398
+ if (facet_num == -1)
399
+ break;
400
+
401
+ if (facet_num == first_facet) {
402
+ // back to the beginning
403
+ BOOST_LOG_TRIVIAL(info) << "Back to the first facet changing vertices: probably a mobius part. Try using a smaller tolerance or don't do a nearby check.";
404
+ return;
405
+ }
406
+ }
407
+ };
408
+
409
+ if (facet1 != -1) {
410
+ int vnot1 = (facet1 == edge_a.facet_number) ?
411
+ (edge_a.which_edge + 2) % 3 :
412
+ (edge_b.which_edge + 2) % 3;
413
+ if (((vnot1 + 2) % 3) == vertex1)
414
+ vnot1 += 3;
415
+ change_vertices(facet1, vnot1, new_vertex1);
416
+ }
417
+ if (facet2 != -1) {
418
+ int vnot2 = (facet2 == edge_a.facet_number) ?
419
+ (edge_a.which_edge + 2) % 3 :
420
+ (edge_b.which_edge + 2) % 3;
421
+ if (((vnot2 + 2) % 3) == vertex2)
422
+ vnot2 += 3;
423
+ change_vertices(facet2, vnot2, new_vertex2);
424
+ }
425
+ stl->stats.edges_fixed += 2;
426
+ }
427
+ };
428
+
429
+ // This function builds the neighbors list. No modifications are made
430
+ // to any of the facets. The edges are said to match only if all six
431
+ // floats of the first edge matches all six floats of the second edge.
432
+ void stl_check_facets_exact(stl_file *stl)
433
+ {
434
+ assert(stl->facet_start.size() == stl->neighbors_start.size());
435
+
436
+ stl->stats.connected_edges = 0;
437
+ stl->stats.connected_facets_1_edge = 0;
438
+ stl->stats.connected_facets_2_edge = 0;
439
+ stl->stats.connected_facets_3_edge = 0;
440
+
441
+ // If any two of the three vertices are found to be exactally the same, call them degenerate and remove the facet.
442
+ // Do it before the next step, as the next step stores references to the face indices in the hash tables and removing a facet
443
+ // will break the references.
444
+ for (uint32_t i = 0; i < stl->stats.number_of_facets;) {
445
+ stl_facet &facet = stl->facet_start[i];
446
+ if (facet.vertex[0] == facet.vertex[1] || facet.vertex[1] == facet.vertex[2] || facet.vertex[0] == facet.vertex[2]) {
447
+ // Remove the degenerate facet.
448
+ facet = stl->facet_start[-- stl->stats.number_of_facets];
449
+ stl->facet_start.pop_back();
450
+ stl->neighbors_start.pop_back();
451
+ stl->stats.facets_removed += 1;
452
+ stl->stats.degenerate_facets += 1;
453
+ } else
454
+ ++ i;
455
+ }
456
+
457
+ // Initialize hash table.
458
+ HashTableEdges hash_table(stl->stats.number_of_facets);
459
+ for (auto &neighbor : stl->neighbors_start)
460
+ neighbor.reset();
461
+
462
+ // Connect neighbor edges.
463
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i) {
464
+ const stl_facet &facet = stl->facet_start[i];
465
+ for (int j = 0; j < 3; ++ j) {
466
+ HashEdge edge;
467
+ edge.facet_number = i;
468
+ edge.which_edge = j;
469
+ edge.load_exact(stl, &facet.vertex[j], &facet.vertex[(j + 1) % 3]);
470
+ hash_table.insert_edge_exact(stl, edge);
471
+ }
472
+ }
473
+
474
+ #if 0
475
+ printf("Number of faces: %d, number of manifold edges: %d, number of connected edges: %d, number of unconnected edges: %d\r\n",
476
+ stl->stats.number_of_facets, stl->stats.number_of_facets * 3,
477
+ stl->stats.connected_edges, stl->stats.number_of_facets * 3 - stl->stats.connected_edges);
478
+ #endif
479
+ }
480
+
481
+ void stl_check_facets_nearby(stl_file *stl, float tolerance)
482
+ {
483
+ assert(stl->stats.connected_facets_3_edge <= stl->stats.connected_facets_2_edge);
484
+ assert(stl->stats.connected_facets_2_edge <= stl->stats.connected_facets_1_edge);
485
+ assert(stl->stats.connected_facets_1_edge <= stl->stats.number_of_facets);
486
+
487
+ if (stl->stats.connected_facets_3_edge == stl->stats.number_of_facets)
488
+ // No need to check any further. All facets are connected.
489
+ return;
490
+
491
+ HashTableEdges hash_table(stl->stats.number_of_facets);
492
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i) {
493
+ //FIXME is the copy necessary?
494
+ stl_facet facet = stl->facet_start[i];
495
+ for (int j = 0; j < 3; j++) {
496
+ if (stl->neighbors_start[i].neighbor[j] == -1) {
497
+ HashEdge edge;
498
+ edge.facet_number = i;
499
+ edge.which_edge = j;
500
+ if (edge.load_nearby(stl, facet.vertex[j], facet.vertex[(j + 1) % 3], tolerance))
501
+ // Only insert edges that have different keys.
502
+ hash_table.insert_edge_nearby(stl, edge);
503
+ }
504
+ }
505
+ }
506
+ }
507
+
508
+ void stl_remove_unconnected_facets(stl_file *stl)
509
+ {
510
+ // A couple of things need to be done here. One is to remove any completely unconnected facets (0 edges connected) since these are
511
+ // useless and could be completely wrong. The second thing that needs to be done is to remove any degenerate facets that were created during
512
+ // stl_check_facets_nearby().
513
+ auto remove_facet = [stl](int facet_number)
514
+ {
515
+ ++ stl->stats.facets_removed;
516
+ /* Update list of connected edges */
517
+ stl_neighbors &neighbors = stl->neighbors_start[facet_number];
518
+ // Update statistics on unconnected triangle edges.
519
+ switch (neighbors.num_neighbors()) {
520
+ case 3: -- stl->stats.connected_facets_3_edge; // fall through
521
+ case 2: -- stl->stats.connected_facets_2_edge; // fall through
522
+ case 1: -- stl->stats.connected_facets_1_edge; // fall through
523
+ case 0: break;
524
+ default: assert(false);
525
+ }
526
+
527
+ if (facet_number < int(-- stl->stats.number_of_facets)) {
528
+ // Removing a face, which was not the last one.
529
+ // Copy the face and neighborship from the last face to facet_number.
530
+ stl->facet_start[facet_number] = stl->facet_start[stl->stats.number_of_facets];
531
+ neighbors = stl->neighbors_start[stl->stats.number_of_facets];
532
+ // Update neighborship of faces, which used to point to the last face, now moved to facet_number.
533
+ for (int i = 0; i < 3; ++ i)
534
+ if (neighbors.neighbor[i] != -1) {
535
+ int &other_face_idx = stl->neighbors_start[neighbors.neighbor[i]].neighbor[(neighbors.which_vertex_not[i] + 1) % 3];
536
+ if (other_face_idx != stl->stats.number_of_facets) {
537
+ BOOST_LOG_TRIVIAL(info) << "in remove_facet: neighbor = " << other_face_idx << " numfacets = " << stl->stats.number_of_facets << " this is wrong";
538
+ return;
539
+ }
540
+ other_face_idx = facet_number;
541
+ }
542
+ }
543
+
544
+ stl->facet_start.pop_back();
545
+ stl->neighbors_start.pop_back();
546
+ };
547
+
548
+ auto remove_degenerate = [stl, remove_facet](int facet)
549
+ {
550
+ // Update statistics on face connectivity after one edge was disconnected on the facet "facet_num".
551
+ auto update_connects_remove_1 = [stl](int facet_num) {
552
+ switch (stl->neighbors_start[facet_num].num_neighbors()) {
553
+ case 0: assert(false); break;
554
+ case 1: -- stl->stats.connected_facets_1_edge; break;
555
+ case 2: -- stl->stats.connected_facets_2_edge; break;
556
+ case 3: -- stl->stats.connected_facets_3_edge; break;
557
+ default: assert(false);
558
+ }
559
+ };
560
+
561
+ int edge_to_collapse = 0;
562
+ if (stl->facet_start[facet].vertex[0] == stl->facet_start[facet].vertex[1]) {
563
+ if (stl->facet_start[facet].vertex[1] == stl->facet_start[facet].vertex[2]) {
564
+ // All 3 vertices are equal. Collapse the edge with no neighbor if it exists.
565
+ const int *nbr = stl->neighbors_start[facet].neighbor;
566
+ edge_to_collapse = (nbr[0] == -1) ? 0 : (nbr[1] == -1) ? 1 : 2;
567
+ } else {
568
+ edge_to_collapse = 0;
569
+ }
570
+ } else if (stl->facet_start[facet].vertex[1] == stl->facet_start[facet].vertex[2]) {
571
+ edge_to_collapse = 1;
572
+ } else if (stl->facet_start[facet].vertex[2] == stl->facet_start[facet].vertex[0]) {
573
+ edge_to_collapse = 2;
574
+ } else {
575
+ // No degenerate. Function shouldn't have been called.
576
+ return;
577
+ }
578
+
579
+ int edge[3] = { (edge_to_collapse + 1) % 3, (edge_to_collapse + 2) % 3, edge_to_collapse };
580
+ int neighbor[] = {
581
+ stl->neighbors_start[facet].neighbor[edge[0]],
582
+ stl->neighbors_start[facet].neighbor[edge[1]],
583
+ stl->neighbors_start[facet].neighbor[edge[2]]
584
+ };
585
+ int vnot[] = {
586
+ stl->neighbors_start[facet].which_vertex_not[edge[0]],
587
+ stl->neighbors_start[facet].which_vertex_not[edge[1]],
588
+ stl->neighbors_start[facet].which_vertex_not[edge[2]]
589
+ };
590
+
591
+ // Update statistics on edge connectivity.
592
+ if ((neighbor[0] == -1) && (neighbor[1] != -1))
593
+ update_connects_remove_1(neighbor[1]);
594
+ if ((neighbor[1] == -1) && (neighbor[0] != -1))
595
+ update_connects_remove_1(neighbor[0]);
596
+
597
+ if (neighbor[0] >= 0) {
598
+ if (neighbor[1] >= 0) {
599
+ // Adjust the "flip" flag for the which_vertex_not values.
600
+ if (vnot[0] > 2) {
601
+ if (vnot[1] > 2) {
602
+ // The face to be removed has its normal flipped compared to the left & right neighbors, therefore after removing this face
603
+ // the two remaining neighbors will be oriented correctly.
604
+ vnot[0] -= 3;
605
+ vnot[1] -= 3;
606
+ } else
607
+ // One neighbor has its normal inverted compared to the face to be removed, the other is oriented equally.
608
+ // After removal, the two neighbors will have their normals flipped.
609
+ vnot[1] += 3;
610
+ } else if (vnot[1] > 2)
611
+ // One neighbor has its normal inverted compared to the face to be removed, the other is oriented equally.
612
+ // After removal, the two neighbors will have their normals flipped.
613
+ vnot[0] += 3;
614
+ }
615
+ stl->neighbors_start[neighbor[0]].neighbor[(vnot[0] + 1) % 3] = (neighbor[0] == neighbor[1]) ? -1 : neighbor[1];
616
+ stl->neighbors_start[neighbor[0]].which_vertex_not[(vnot[0] + 1) % 3] = vnot[1];
617
+ }
618
+ if (neighbor[1] >= 0) {
619
+ stl->neighbors_start[neighbor[1]].neighbor[(vnot[1] + 1) % 3] = (neighbor[0] == neighbor[1]) ? -1 : neighbor[0];
620
+ stl->neighbors_start[neighbor[1]].which_vertex_not[(vnot[1] + 1) % 3] = vnot[0];
621
+ }
622
+ if (neighbor[2] >= 0) {
623
+ update_connects_remove_1(neighbor[2]);
624
+ stl->neighbors_start[neighbor[2]].neighbor[(vnot[2] + 1) % 3] = -1;
625
+ }
626
+
627
+ remove_facet(facet);
628
+ };
629
+
630
+ // remove degenerate facets
631
+ for (uint32_t i = 0; i < stl->stats.number_of_facets;)
632
+ if (stl->facet_start[i].vertex[0] == stl->facet_start[i].vertex[1] ||
633
+ stl->facet_start[i].vertex[0] == stl->facet_start[i].vertex[2] ||
634
+ stl->facet_start[i].vertex[1] == stl->facet_start[i].vertex[2]) {
635
+ remove_degenerate(i);
636
+ // assert(stl_validate(stl));
637
+ } else
638
+ ++ i;
639
+
640
+ if (stl->stats.connected_facets_1_edge < (int)stl->stats.number_of_facets) {
641
+ // There are some faces with no connected edge at all. Remove completely unconnected facets.
642
+ for (uint32_t i = 0; i < stl->stats.number_of_facets;)
643
+ if (stl->neighbors_start[i].num_neighbors() == 0) {
644
+ // This facet is completely unconnected. Remove it.
645
+ remove_facet(i);
646
+ assert(stl_validate(stl));
647
+ } else
648
+ ++ i;
649
+ }
650
+ }
651
+
652
+ void stl_fill_holes(stl_file *stl)
653
+ {
654
+ // Insert all unconnected edges into hash list.
655
+ HashTableEdges hash_table(stl->stats.number_of_facets);
656
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i) {
657
+ stl_facet facet = stl->facet_start[i];
658
+ for (int j = 0; j < 3; ++ j) {
659
+ if(stl->neighbors_start[i].neighbor[j] != -1)
660
+ continue;
661
+ HashEdge edge;
662
+ edge.facet_number = i;
663
+ edge.which_edge = j;
664
+ edge.load_exact(stl, &facet.vertex[j], &facet.vertex[(j + 1) % 3]);
665
+ hash_table.insert_edge_exact(stl, edge);
666
+ }
667
+ }
668
+
669
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i) {
670
+ stl_facet facet = stl->facet_start[i];
671
+ int neighbors_initial[3] = { stl->neighbors_start[i].neighbor[0], stl->neighbors_start[i].neighbor[1], stl->neighbors_start[i].neighbor[2] };
672
+ int first_facet = i;
673
+ for (int j = 0; j < 3; ++ j) {
674
+ if (stl->neighbors_start[i].neighbor[j] != -1)
675
+ continue;
676
+
677
+ stl_facet new_facet;
678
+ new_facet.vertex[0] = facet.vertex[j];
679
+ new_facet.vertex[1] = facet.vertex[(j + 1) % 3];
680
+ bool direction = neighbors_initial[(j + 2) % 3] == -1;
681
+ int facet_num = i;
682
+ int vnot = (j + 2) % 3;
683
+
684
+ for (;;) {
685
+ int pivot_vertex = 0;
686
+ int next_edge = 0;
687
+ if (vnot > 2) {
688
+ if (direction) {
689
+ pivot_vertex = (vnot + 1) % 3;
690
+ next_edge = vnot % 3;
691
+ } else {
692
+ pivot_vertex = (vnot + 2) % 3;
693
+ next_edge = pivot_vertex;
694
+ }
695
+ direction = ! direction;
696
+ } else {
697
+ if(direction == 0) {
698
+ pivot_vertex = (vnot + 1) % 3;
699
+ next_edge = vnot;
700
+ } else {
701
+ pivot_vertex = (vnot + 2) % 3;
702
+ next_edge = pivot_vertex;
703
+ }
704
+ }
705
+
706
+ int next_facet = stl->neighbors_start[facet_num].neighbor[next_edge];
707
+ if (next_facet == -1) {
708
+ new_facet.vertex[2] = stl->facet_start[facet_num].vertex[vnot % 3];
709
+ stl_add_facet(stl, &new_facet);
710
+ for (int k = 0; k < 3; ++ k) {
711
+ HashEdge edge;
712
+ edge.facet_number = stl->stats.number_of_facets - 1;
713
+ edge.which_edge = k;
714
+ edge.load_exact(stl, &new_facet.vertex[k], &new_facet.vertex[(k + 1) % 3]);
715
+ hash_table.insert_edge_exact(stl, edge);
716
+ }
717
+ break;
718
+ }
719
+
720
+ vnot = stl->neighbors_start[facet_num].which_vertex_not[next_edge];
721
+ facet_num = next_facet;
722
+
723
+ if (facet_num == first_facet) {
724
+ // back to the beginning
725
+ BOOST_LOG_TRIVIAL(info) << "Back to the first facet filling holes: probably a mobius part. Try using a smaller tolerance or don't do a nearby check.";
726
+ return;
727
+ }
728
+ }
729
+ }
730
+ }
731
+ }
732
+
733
+ void stl_add_facet(stl_file *stl, const stl_facet *new_facet)
734
+ {
735
+ assert(stl->facet_start.size() == stl->stats.number_of_facets);
736
+ assert(stl->neighbors_start.size() == stl->stats.number_of_facets);
737
+ stl->facet_start.emplace_back(*new_facet);
738
+ // note that the normal vector is not set here, just initialized to 0.
739
+ stl->facet_start[stl->stats.number_of_facets].normal = stl_normal::Zero();
740
+ stl->neighbors_start.emplace_back();
741
+ ++ stl->stats.facets_added;
742
+ ++ stl->stats.number_of_facets;
743
+ }
data/bundled_deps/admesh/admesh/normals.cpp ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ADMesh -- process triangulated solid meshes
2
+ * Copyright (C) 1995, 1996 Anthony D. Martin <[email protected]>
3
+ * Copyright (C) 2013, 2014 several contributors, see AUTHORS
4
+ *
5
+ * This program is free software; you can redistribute it and/or modify
6
+ * it under the terms of the GNU General Public License as published by
7
+ * the Free Software Foundation; either version 2 of the License, or
8
+ * (at your option) any later version.
9
+
10
+ * This program is distributed in the hope that it will be useful,
11
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
+ * GNU General Public License for more details.
14
+
15
+ * You should have received a copy of the GNU General Public License along
16
+ * with this program; if not, write to the Free Software Foundation, Inc.,
17
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18
+ *
19
+ * Questions, comments, suggestions, etc to
20
+ * https://github.com/admesh/admesh/issues
21
+ */
22
+
23
+ #include <stdio.h>
24
+ #include <stdlib.h>
25
+ #include <string.h>
26
+ #include <math.h>
27
+
28
+ // Boost pool: Don't use mutexes to synchronize memory allocation.
29
+ #define BOOST_POOL_NO_MT
30
+ #include <boost/pool/object_pool.hpp>
31
+
32
+ #include "stl.h"
33
+
34
+ static void reverse_facet(stl_file *stl, int facet_num)
35
+ {
36
+ ++ stl->stats.facets_reversed;
37
+
38
+ int neighbor[3] = { stl->neighbors_start[facet_num].neighbor[0], stl->neighbors_start[facet_num].neighbor[1], stl->neighbors_start[facet_num].neighbor[2] };
39
+ int vnot[3] = { stl->neighbors_start[facet_num].which_vertex_not[0], stl->neighbors_start[facet_num].which_vertex_not[1], stl->neighbors_start[facet_num].which_vertex_not[2] };
40
+
41
+ // reverse the facet
42
+ stl_vertex tmp_vertex = stl->facet_start[facet_num].vertex[0];
43
+ stl->facet_start[facet_num].vertex[0] = stl->facet_start[facet_num].vertex[1];
44
+ stl->facet_start[facet_num].vertex[1] = tmp_vertex;
45
+
46
+ // fix the vnots of the neighboring facets
47
+ if (neighbor[0] != -1)
48
+ stl->neighbors_start[neighbor[0]].which_vertex_not[(vnot[0] + 1) % 3] = (stl->neighbors_start[neighbor[0]].which_vertex_not[(vnot[0] + 1) % 3] + 3) % 6;
49
+ if (neighbor[1] != -1)
50
+ stl->neighbors_start[neighbor[1]].which_vertex_not[(vnot[1] + 1) % 3] = (stl->neighbors_start[neighbor[1]].which_vertex_not[(vnot[1] + 1) % 3] + 4) % 6;
51
+ if (neighbor[2] != -1)
52
+ stl->neighbors_start[neighbor[2]].which_vertex_not[(vnot[2] + 1) % 3] = (stl->neighbors_start[neighbor[2]].which_vertex_not[(vnot[2] + 1) % 3] + 2) % 6;
53
+
54
+ // swap the neighbors of the facet that is being reversed
55
+ stl->neighbors_start[facet_num].neighbor[1] = neighbor[2];
56
+ stl->neighbors_start[facet_num].neighbor[2] = neighbor[1];
57
+
58
+ // swap the vnots of the facet that is being reversed
59
+ stl->neighbors_start[facet_num].which_vertex_not[1] = vnot[2];
60
+ stl->neighbors_start[facet_num].which_vertex_not[2] = vnot[1];
61
+
62
+ // reverse the values of the vnots of the facet that is being reversed
63
+ stl->neighbors_start[facet_num].which_vertex_not[0] = (stl->neighbors_start[facet_num].which_vertex_not[0] + 3) % 6;
64
+ stl->neighbors_start[facet_num].which_vertex_not[1] = (stl->neighbors_start[facet_num].which_vertex_not[1] + 3) % 6;
65
+ stl->neighbors_start[facet_num].which_vertex_not[2] = (stl->neighbors_start[facet_num].which_vertex_not[2] + 3) % 6;
66
+ }
67
+
68
+ // Returns true if the normal was flipped.
69
+ static bool check_normal_vector(stl_file *stl, int facet_num, int normal_fix_flag)
70
+ {
71
+ stl_facet *facet = &stl->facet_start[facet_num];
72
+
73
+ stl_normal normal;
74
+ stl_calculate_normal(normal, facet);
75
+ stl_normalize_vector(normal);
76
+ stl_normal normal_dif = (normal - facet->normal).cwiseAbs();
77
+
78
+ const float eps = 0.001f;
79
+ if (normal_dif(0) < eps && normal_dif(1) < eps && normal_dif(2) < eps) {
80
+ // Normal is within tolerance. It is not really necessary to change the values here, but just for consistency, I will.
81
+ facet->normal = normal;
82
+ return false;
83
+ }
84
+
85
+ stl_normal test_norm = facet->normal;
86
+ stl_normalize_vector(test_norm);
87
+ normal_dif = (normal - test_norm).cwiseAbs();
88
+ if (normal_dif(0) < eps && normal_dif(1) < eps && normal_dif(2) < eps) {
89
+ // The normal is not within tolerance, but direction is OK.
90
+ if (normal_fix_flag) {
91
+ facet->normal = normal;
92
+ ++ stl->stats.normals_fixed;
93
+ }
94
+ return false;
95
+ }
96
+
97
+ test_norm *= -1.f;
98
+ normal_dif = (normal - test_norm).cwiseAbs();
99
+ if (normal_dif(0) < eps && normal_dif(1) < eps && normal_dif(2) < eps) {
100
+ // The normal is not within tolerance and backwards.
101
+ if (normal_fix_flag) {
102
+ facet->normal = normal;
103
+ ++ stl->stats.normals_fixed;
104
+ }
105
+ return true;
106
+ }
107
+ if (normal_fix_flag) {
108
+ facet->normal = normal;
109
+ ++ stl->stats.normals_fixed;
110
+ }
111
+ // Status is unknown.
112
+ return false;
113
+ }
114
+
115
+ void stl_fix_normal_directions(stl_file *stl)
116
+ {
117
+ // This may happen for malformed models, see: https://github.com/prusa3d/PrusaSlicer/issues/2209
118
+ if (stl->stats.number_of_facets == 0)
119
+ return;
120
+
121
+ struct stl_normal {
122
+ int facet_num;
123
+ stl_normal *next;
124
+ };
125
+
126
+ // Initialize linked list.
127
+ boost::object_pool<stl_normal> pool;
128
+ stl_normal *head = pool.construct();
129
+ stl_normal *tail = pool.construct();
130
+ head->next = tail;
131
+ tail->next = tail;
132
+
133
+ // Initialize list that keeps track of already fixed facets.
134
+ std::vector<char> norm_sw(stl->stats.number_of_facets, 0);
135
+ // Initialize list that keeps track of reversed facets.
136
+ std::vector<int> reversed_ids;
137
+ reversed_ids.reserve(stl->stats.number_of_facets);
138
+
139
+ int facet_num = 0;
140
+ // If normal vector is not within tolerance and backwards:
141
+ // Arbitrarily starts at face 0. If this one is wrong, we're screwed. Thankfully, the chances
142
+ // of it being wrong randomly are low if most of the triangles are right:
143
+ if (check_normal_vector(stl, 0, 0)) {
144
+ reverse_facet(stl, 0);
145
+ reversed_ids.emplace_back(0);
146
+ }
147
+
148
+ // Say that we've fixed this facet:
149
+ norm_sw[facet_num] = 1;
150
+ int checked = 1;
151
+
152
+ for (;;) {
153
+ // Add neighbors_to_list. Add unconnected neighbors to the list.
154
+ bool force_exit = false;
155
+ for (int j = 0; j < 3; ++ j) {
156
+ // Reverse the neighboring facets if necessary.
157
+ if (stl->neighbors_start[facet_num].which_vertex_not[j] > 2) {
158
+ // If the facet has a neighbor that is -1, it means that edge isn't shared by another facet
159
+ if (stl->neighbors_start[facet_num].neighbor[j] != -1) {
160
+ if (norm_sw[stl->neighbors_start[facet_num].neighbor[j]] == 1) {
161
+ // trying to modify a facet already marked as fixed, revert all changes made until now and exit (fixes: #716, #574, #413, #269, #262, #259, #230, #228, #206)
162
+ for (int id = int(reversed_ids.size()) - 1; id >= 0; -- id)
163
+ reverse_facet(stl, reversed_ids[id]);
164
+ force_exit = true;
165
+ break;
166
+ }
167
+ reverse_facet(stl, stl->neighbors_start[facet_num].neighbor[j]);
168
+ reversed_ids.emplace_back(stl->neighbors_start[facet_num].neighbor[j]);
169
+ }
170
+ }
171
+ // If this edge of the facet is connected:
172
+ if (stl->neighbors_start[facet_num].neighbor[j] != -1) {
173
+ // If we haven't fixed this facet yet, add it to the list:
174
+ if (norm_sw[stl->neighbors_start[facet_num].neighbor[j]] != 1) {
175
+ // Add node to beginning of list.
176
+ stl_normal *newn = pool.construct();
177
+ newn->facet_num = stl->neighbors_start[facet_num].neighbor[j];
178
+ newn->next = head->next;
179
+ head->next = newn;
180
+ }
181
+ }
182
+ }
183
+
184
+ // an error occourred, quit the for loop and exit
185
+ if (force_exit)
186
+ break;
187
+
188
+ // Get next facet to fix from top of list.
189
+ if (head->next != tail) {
190
+ facet_num = head->next->facet_num;
191
+ assert(facet_num < stl->stats.number_of_facets);
192
+ if (norm_sw[facet_num] != 1) { // If facet is in list mutiple times
193
+ norm_sw[facet_num] = 1; // Record this one as being fixed.
194
+ ++ checked;
195
+ }
196
+ stl_normal *temp = head->next; // Delete this facet from the list.
197
+ head->next = head->next->next;
198
+ // pool.destroy(temp);
199
+ } else { // If we ran out of facets to fix: All of the facets in this part have been fixed.
200
+ ++ stl->stats.number_of_parts;
201
+ if (checked >= int(stl->stats.number_of_facets))
202
+ // All of the facets have been checked. Bail out.
203
+ break;
204
+ // There is another part here. Find it and continue.
205
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i)
206
+ if (norm_sw[i] == 0) {
207
+ // This is the first facet of the next part.
208
+ facet_num = i;
209
+ if (check_normal_vector(stl, i, 0)) {
210
+ reverse_facet(stl, i);
211
+ reversed_ids.emplace_back(i);
212
+ }
213
+ norm_sw[facet_num] = 1;
214
+ ++ checked;
215
+ break;
216
+ }
217
+ }
218
+ }
219
+
220
+ // pool.destroy(head);
221
+ // pool.destroy(tail);
222
+ }
223
+
224
+ void stl_fix_normal_values(stl_file *stl)
225
+ {
226
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i)
227
+ check_normal_vector(stl, i, 1);
228
+ }
229
+
230
+ void stl_reverse_all_facets(stl_file *stl)
231
+ {
232
+ stl_normal normal;
233
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i) {
234
+ reverse_facet(stl, i);
235
+ stl_calculate_normal(normal, &stl->facet_start[i]);
236
+ stl_normalize_vector(normal);
237
+ stl->facet_start[i].normal = normal;
238
+ }
239
+ }
data/bundled_deps/admesh/admesh/shared.cpp ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ADMesh -- process triangulated solid meshes
2
+ * Copyright (C) 1995, 1996 Anthony D. Martin <[email protected]>
3
+ * Copyright (C) 2013, 2014 several contributors, see AUTHORS
4
+ *
5
+ * This program is free software; you can redistribute it and/or modify
6
+ * it under the terms of the GNU General Public License as published by
7
+ * the Free Software Foundation; either version 2 of the License, or
8
+ * (at your option) any later version.
9
+
10
+ * This program is distributed in the hope that it will be useful,
11
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
+ * GNU General Public License for more details.
14
+
15
+ * You should have received a copy of the GNU General Public License along
16
+ * with this program; if not, write to the Free Software Foundation, Inc.,
17
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18
+ *
19
+ * Questions, comments, suggestions, etc to
20
+ * https://github.com/admesh/admesh/issues
21
+ */
22
+
23
+ #include <stdlib.h>
24
+ #include <string.h>
25
+
26
+ #include <vector>
27
+
28
+ #include <boost/log/trivial.hpp>
29
+ #include <boost/nowide/cstdio.hpp>
30
+
31
+ #include "stl.h"
32
+
33
+ #include <LocalesUtils.hpp>
34
+
35
+ void stl_generate_shared_vertices(stl_file *stl, indexed_triangle_set &its)
36
+ {
37
+ // 3 indices to vertex per face
38
+ its.indices.assign(stl->stats.number_of_facets, stl_triangle_vertex_indices(-1, -1, -1));
39
+ // Shared vertices (3D coordinates)
40
+ its.vertices.clear();
41
+ its.vertices.reserve(stl->stats.number_of_facets / 2);
42
+
43
+ // A degenerate mesh may contain loops: Traversing a fan will end up in an endless loop
44
+ // while never reaching the starting face. To avoid these endless loops, traversed faces at each fan traversal
45
+ // are marked with a unique fan_traversal_stamp.
46
+ unsigned int fan_traversal_stamp = 0;
47
+ std::vector<unsigned int> fan_traversal_facet_visited(stl->stats.number_of_facets, 0);
48
+
49
+ for (uint32_t facet_idx = 0; facet_idx < stl->stats.number_of_facets; ++ facet_idx) {
50
+ for (int j = 0; j < 3; ++ j) {
51
+ if (its.indices[facet_idx][j] != -1)
52
+ // Shared vertex was already assigned.
53
+ continue;
54
+ // Create a new shared vertex.
55
+ its.vertices.emplace_back(stl->facet_start[facet_idx].vertex[j]);
56
+ // Traverse the fan around the j-th vertex of the i-th face, assign the newly created shared vertex index to all the neighboring triangles in the triangle fan.
57
+ int facet_in_fan_idx = facet_idx;
58
+ bool edge_direction = false;
59
+ bool traversal_reversed = false;
60
+ int vnot = (j + 2) % 3;
61
+ // Increase the
62
+ ++ fan_traversal_stamp;
63
+ for (;;) {
64
+ // Next edge on facet_in_fan_idx to be traversed. The edge is indexed by its starting vertex index.
65
+ int next_edge = 0;
66
+ // Vertex index in facet_in_fan_idx, which is being pivoted around, and which is being assigned a new shared vertex.
67
+ int pivot_vertex = 0;
68
+ if (vnot > 2) {
69
+ // The edge of facet_in_fan_idx opposite to vnot is equally oriented, therefore
70
+ // the neighboring facet is flipped.
71
+ if (! edge_direction) {
72
+ pivot_vertex = (vnot + 2) % 3;
73
+ next_edge = pivot_vertex;
74
+ } else {
75
+ pivot_vertex = (vnot + 1) % 3;
76
+ next_edge = vnot % 3;
77
+ }
78
+ edge_direction = ! edge_direction;
79
+ } else {
80
+ // The neighboring facet is correctly oriented.
81
+ if (! edge_direction) {
82
+ pivot_vertex = (vnot + 1) % 3;
83
+ next_edge = vnot;
84
+ } else {
85
+ pivot_vertex = (vnot + 2) % 3;
86
+ next_edge = pivot_vertex;
87
+ }
88
+ }
89
+ its.indices[facet_in_fan_idx][pivot_vertex] = its.vertices.size() - 1;
90
+ fan_traversal_facet_visited[facet_in_fan_idx] = fan_traversal_stamp;
91
+
92
+ // next_edge is an index of the starting vertex of the edge, not an index of the opposite vertex to the edge!
93
+ int next_facet = stl->neighbors_start[facet_in_fan_idx].neighbor[next_edge];
94
+ if (next_facet == -1) {
95
+ // No neighbor going in the current direction.
96
+ if (traversal_reversed) {
97
+ // Went to one limit, then turned back and reached the other limit. Quit the fan traversal.
98
+ break;
99
+ } else {
100
+ // Reached the first limit. Now try to reverse and traverse up to the other limit.
101
+ edge_direction = true;
102
+ vnot = (j + 1) % 3;
103
+ traversal_reversed = true;
104
+ facet_in_fan_idx = facet_idx;
105
+ }
106
+ } else if (next_facet == facet_idx) {
107
+ // Traversed a closed fan all around.
108
+ // assert(! traversal_reversed);
109
+ break;
110
+ } else if (next_facet >= (int)stl->stats.number_of_facets) {
111
+ // The mesh is not valid!
112
+ // assert(false);
113
+ break;
114
+ } else if (fan_traversal_facet_visited[next_facet] == fan_traversal_stamp) {
115
+ // Traversed a closed fan all around, but did not reach the starting face.
116
+ // This indicates an invalid geometry (non-manifold).
117
+ //assert(false);
118
+ break;
119
+ } else {
120
+ // Continue traversal.
121
+ // next_edge is an index of the starting vertex of the edge, not an index of the opposite vertex to the edge!
122
+ vnot = stl->neighbors_start[facet_in_fan_idx].which_vertex_not[next_edge];
123
+ facet_in_fan_idx = next_facet;
124
+ }
125
+ }
126
+ }
127
+ }
128
+ }
129
+
130
+ bool its_write_off(const indexed_triangle_set &its, const char *file)
131
+ {
132
+ Slic3r::CNumericLocalesSetter locales_setter;
133
+ /* Open the file */
134
+ FILE *fp = boost::nowide::fopen(file, "w");
135
+ if (fp == nullptr) {
136
+ BOOST_LOG_TRIVIAL(error) << "stl_write_ascii: Couldn't open " << file << " for writing";
137
+ return false;
138
+ }
139
+
140
+ fprintf(fp, "OFF\n");
141
+ fprintf(fp, "%d %d 0\n", (int)its.vertices.size(), (int)its.indices.size());
142
+ for (int i = 0; i < its.vertices.size(); ++ i)
143
+ fprintf(fp, "\t%f %f %f\n", its.vertices[i](0), its.vertices[i](1), its.vertices[i](2));
144
+ for (uint32_t i = 0; i < its.indices.size(); ++ i)
145
+ fprintf(fp, "\t3 %d %d %d\n", its.indices[i][0], its.indices[i][1], its.indices[i][2]);
146
+ fclose(fp);
147
+ return true;
148
+ }
149
+
150
+ bool its_write_vrml(const indexed_triangle_set &its, const char *file)
151
+ {
152
+ Slic3r::CNumericLocalesSetter locales_setter;
153
+ /* Open the file */
154
+ FILE *fp = boost::nowide::fopen(file, "w");
155
+ if (fp == nullptr) {
156
+ BOOST_LOG_TRIVIAL(error) << "stl_write_vrml: Couldn't open " << file << " for writing";
157
+ return false;
158
+ }
159
+
160
+ fprintf(fp, "#VRML V1.0 ascii\n\n");
161
+ fprintf(fp, "Separator {\n");
162
+ fprintf(fp, "\tDEF STLShape ShapeHints {\n");
163
+ fprintf(fp, "\t\tvertexOrdering COUNTERCLOCKWISE\n");
164
+ fprintf(fp, "\t\tfaceType CONVEX\n");
165
+ fprintf(fp, "\t\tshapeType SOLID\n");
166
+ fprintf(fp, "\t\tcreaseAngle 0.0\n");
167
+ fprintf(fp, "\t}\n");
168
+ fprintf(fp, "\tDEF STLModel Separator {\n");
169
+ fprintf(fp, "\t\tDEF STLColor Material {\n");
170
+ fprintf(fp, "\t\t\temissiveColor 0.700000 0.700000 0.000000\n");
171
+ fprintf(fp, "\t\t}\n");
172
+ fprintf(fp, "\t\tDEF STLVertices Coordinate3 {\n");
173
+ fprintf(fp, "\t\t\tpoint [\n");
174
+
175
+ int i = 0;
176
+ for (; i + 1 < its.vertices.size(); ++ i)
177
+ fprintf(fp, "\t\t\t\t%f %f %f,\n", its.vertices[i](0), its.vertices[i](1), its.vertices[i](2));
178
+ fprintf(fp, "\t\t\t\t%f %f %f]\n", its.vertices[i](0), its.vertices[i](1), its.vertices[i](2));
179
+ fprintf(fp, "\t\t}\n");
180
+ fprintf(fp, "\t\tDEF STLTriangles IndexedFaceSet {\n");
181
+ fprintf(fp, "\t\t\tcoordIndex [\n");
182
+
183
+ for (size_t i = 0; i + 1 < its.indices.size(); ++ i)
184
+ fprintf(fp, "\t\t\t\t%d, %d, %d, -1,\n", its.indices[i][0], its.indices[i][1], its.indices[i][2]);
185
+ fprintf(fp, "\t\t\t\t%d, %d, %d, -1]\n", its.indices[i][0], its.indices[i][1], its.indices[i][2]);
186
+ fprintf(fp, "\t\t}\n");
187
+ fprintf(fp, "\t}\n");
188
+ fprintf(fp, "}\n");
189
+ fclose(fp);
190
+ return true;
191
+ }
192
+
193
+ bool its_write_obj(const indexed_triangle_set &its, const char *file)
194
+ {
195
+ Slic3r::CNumericLocalesSetter locales_setter;
196
+ FILE *fp = boost::nowide::fopen(file, "w");
197
+ if (fp == nullptr) {
198
+ BOOST_LOG_TRIVIAL(error) << "stl_write_obj: Couldn't open " << file << " for writing";
199
+ return false;
200
+ }
201
+
202
+ for (size_t i = 0; i < its.vertices.size(); ++ i)
203
+ fprintf(fp, "v %f %f %f\n", its.vertices[i](0), its.vertices[i](1), its.vertices[i](2));
204
+ for (size_t i = 0; i < its.indices.size(); ++ i)
205
+ fprintf(fp, "f %d %d %d\n", its.indices[i][0]+1, its.indices[i][1]+1, its.indices[i][2]+1);
206
+ fclose(fp);
207
+ return true;
208
+ }
209
+
210
+ bool its_write_obj(const indexed_triangle_set& its, const std::vector<obj_color> &color, const char* file)
211
+ {
212
+ Slic3r::CNumericLocalesSetter locales_setter;
213
+ FILE* fp = boost::nowide::fopen(file, "w");
214
+ if (fp == nullptr) {
215
+ BOOST_LOG_TRIVIAL(error) << "stl_write_obj: Couldn't open " << file << " for writing";
216
+ return false;
217
+ }
218
+
219
+ for (size_t i = 0; i < its.vertices.size(); ++i)
220
+ fprintf(fp, "v %f %f %f %f %f %f\n",
221
+ its.vertices[i](0),
222
+ its.vertices[i](1),
223
+ its.vertices[i](2),
224
+ color[i](0),
225
+ color[i](1),
226
+ color[i](2));
227
+ for (size_t i = 0; i < its.indices.size(); ++i)
228
+ fprintf(fp, "f %d %d %d\n",
229
+ its.indices[i][0] + 1,
230
+ its.indices[i][1] + 1,
231
+ its.indices[i][2] + 1);
232
+ fclose(fp);
233
+ return true;
234
+ }
235
+
236
+ // Check validity of the mesh, assert on error.
237
+ bool stl_validate(const stl_file *stl, const indexed_triangle_set &its)
238
+ {
239
+ assert(! stl->facet_start.empty());
240
+ assert(stl->facet_start.size() == stl->stats.number_of_facets);
241
+ assert(stl->neighbors_start.size() == stl->stats.number_of_facets);
242
+ assert(stl->facet_start.size() == stl->neighbors_start.size());
243
+ assert(! stl->neighbors_start.empty());
244
+ assert((its.indices.empty()) == (its.vertices.empty()));
245
+ assert(stl->stats.number_of_facets > 0);
246
+ assert(its.vertices.empty() || its.indices.size() == stl->stats.number_of_facets);
247
+
248
+ #ifdef _DEBUG
249
+ // Verify validity of neighborship data.
250
+ for (int facet_idx = 0; facet_idx < (int)stl->stats.number_of_facets; ++ facet_idx) {
251
+ const stl_neighbors &nbr = stl->neighbors_start[facet_idx];
252
+ const int *vertices = its.indices.empty() ? nullptr : its.indices[facet_idx].data();
253
+ for (int nbr_idx = 0; nbr_idx < 3; ++ nbr_idx) {
254
+ int nbr_face = stl->neighbors_start[facet_idx].neighbor[nbr_idx];
255
+ assert(nbr_face < (int)stl->stats.number_of_facets);
256
+ if (nbr_face != -1) {
257
+ int nbr_vnot = nbr.which_vertex_not[nbr_idx];
258
+ assert(nbr_vnot >= 0 && nbr_vnot < 6);
259
+ // Neighbor of the neighbor is the original face.
260
+ assert(stl->neighbors_start[nbr_face].neighbor[(nbr_vnot + 1) % 3] == facet_idx);
261
+ int vnot_back = stl->neighbors_start[nbr_face].which_vertex_not[(nbr_vnot + 1) % 3];
262
+ assert(vnot_back >= 0 && vnot_back < 6);
263
+ assert((nbr_vnot < 3) == (vnot_back < 3));
264
+ assert(vnot_back % 3 == (nbr_idx + 2) % 3);
265
+ if (vertices != nullptr) {
266
+ // Has shared vertices.
267
+ if (nbr_vnot < 3) {
268
+ // Faces facet_idx and nbr_face share two vertices accross the common edge. Faces are correctly oriented.
269
+ assert((its.indices[nbr_face][(nbr_vnot + 1) % 3] == vertices[(nbr_idx + 1) % 3] && its.indices[nbr_face][(nbr_vnot + 2) % 3] == vertices[nbr_idx]));
270
+ } else {
271
+ // Faces facet_idx and nbr_face share two vertices accross the common edge. Faces are incorrectly oriented, one of them is flipped.
272
+ assert((its.indices[nbr_face][(nbr_vnot + 2) % 3] == vertices[(nbr_idx + 1) % 3] && its.indices[nbr_face][(nbr_vnot + 1) % 3] == vertices[nbr_idx]));
273
+ }
274
+ }
275
+ }
276
+ }
277
+ }
278
+ #endif /* _DEBUG */
279
+
280
+ return true;
281
+ }
282
+
283
+ // Check validity of the mesh, assert on error.
284
+ bool stl_validate(const stl_file *stl)
285
+ {
286
+ indexed_triangle_set its;
287
+ return stl_validate(stl, its);
288
+ }
data/bundled_deps/admesh/admesh/stl.h ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ADMesh -- process triangulated solid meshes
2
+ * Copyright (C) 1995, 1996 Anthony D. Martin <[email protected]>
3
+ * Copyright (C) 2013, 2014 several contributors, see AUTHORS
4
+ *
5
+ * This program is free software; you can redistribute it and/or modify
6
+ * it under the terms of the GNU General Public License as published by
7
+ * the Free Software Foundation; either version 2 of the License, or
8
+ * (at your option) any later version.
9
+
10
+ * This program is distributed in the hope that it will be useful,
11
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
+ * GNU General Public License for more details.
14
+
15
+ * You should have received a copy of the GNU General Public License along
16
+ * with this program; if not, write to the Free Software Foundation, Inc.,
17
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18
+ *
19
+ * Questions, comments, suggestions, etc to
20
+ * https://github.com/admesh/admesh/issues
21
+ */
22
+
23
+ #ifndef __admesh_stl__
24
+ #define __admesh_stl__
25
+
26
+ #include <stdio.h>
27
+ #include <stdint.h>
28
+ #include <stddef.h>
29
+
30
+ #include <vector>
31
+ #include <Eigen/Geometry>
32
+
33
+ // Size of the binary STL header, free form.
34
+ #define LABEL_SIZE 80
35
+ // Binary STL, length of the "number of faces" counter.
36
+ #define NUM_FACET_SIZE 4
37
+ // Binary STL, sizeof header + number of faces.
38
+ #define HEADER_SIZE 84
39
+ #define STL_MIN_FILE_SIZE 284
40
+ #define ASCII_LINES_PER_FACET 7
41
+
42
+ typedef Eigen::Matrix<float, 3, 1, Eigen::DontAlign> stl_vertex;
43
+ typedef Eigen::Matrix<float, 3, 1, Eigen::DontAlign> stl_normal;
44
+ typedef Eigen::Matrix<int, 3, 1, Eigen::DontAlign> stl_triangle_vertex_indices;
45
+ static_assert(sizeof(stl_vertex) == 12, "size of stl_vertex incorrect");
46
+ static_assert(sizeof(stl_normal) == 12, "size of stl_normal incorrect");
47
+
48
+ struct stl_facet {
49
+ stl_normal normal;
50
+ stl_vertex vertex[3];
51
+ char extra[2];
52
+
53
+ stl_facet rotated(const Eigen::Quaternion<float, Eigen::DontAlign> &rot) const {
54
+ stl_facet out;
55
+ out.normal = rot * this->normal;
56
+ out.vertex[0] = rot * this->vertex[0];
57
+ out.vertex[1] = rot * this->vertex[1];
58
+ out.vertex[2] = rot * this->vertex[2];
59
+ return out;
60
+ }
61
+ };
62
+
63
+ #define SIZEOF_STL_FACET 50
64
+
65
+ static_assert(offsetof(stl_facet, normal) == 0, "stl_facet.normal has correct offset");
66
+ static_assert(offsetof(stl_facet, vertex) == 12, "stl_facet.vertex has correct offset");
67
+ static_assert(offsetof(stl_facet, extra ) == 48, "stl_facet.extra has correct offset");
68
+ static_assert(sizeof(stl_facet) >= SIZEOF_STL_FACET, "size of stl_facet incorrect");
69
+
70
+ typedef enum {binary, ascii, inmemory} stl_type;
71
+
72
+ struct stl_neighbors {
73
+ stl_neighbors() { reset(); }
74
+ void reset() {
75
+ neighbor[0] = -1;
76
+ neighbor[1] = -1;
77
+ neighbor[2] = -1;
78
+ which_vertex_not[0] = -1;
79
+ which_vertex_not[1] = -1;
80
+ which_vertex_not[2] = -1;
81
+ }
82
+ int num_neighbors() const { return 3 - ((this->neighbor[0] == -1) + (this->neighbor[1] == -1) + (this->neighbor[2] == -1)); }
83
+
84
+ // Index of a neighbor facet.
85
+ int neighbor[3];
86
+ // Index of an opposite vertex at the neighbor face.
87
+ char which_vertex_not[3];
88
+ };
89
+
90
+ struct stl_stats {
91
+ stl_stats() { memset(&header, 0, 81); }
92
+ char header[81];
93
+ stl_type type = (stl_type)0;
94
+ // Should always match the number of facets stored inside stl_file::facet_start.
95
+ uint32_t number_of_facets = 0;
96
+ // Bounding box.
97
+ stl_vertex max = stl_vertex::Zero();
98
+ stl_vertex min = stl_vertex::Zero();
99
+ stl_vertex size = stl_vertex::Zero();
100
+ float bounding_diameter = 0.f;
101
+ float shortest_edge = 0.f;
102
+ // After repair, the volume shall always be positive.
103
+ float volume = -1.f;
104
+ // Number of face edges connected to another face.
105
+ // Don't use this statistics after repair, use the connected_facets_1/2/3_edge instead!
106
+ int connected_edges = 0;
107
+ // Faces with >=1, >=2 and 3 edges connected to another face.
108
+ int connected_facets_1_edge = 0;
109
+ int connected_facets_2_edge = 0;
110
+ int connected_facets_3_edge = 0;
111
+ // Faces with 1, 2 and 3 open edges after exact chaining, but before repair.
112
+ int facets_w_1_bad_edge = 0;
113
+ int facets_w_2_bad_edge = 0;
114
+ int facets_w_3_bad_edge = 0;
115
+ // Number of faces read form an STL file.
116
+ int original_num_facets = 0;
117
+ // Number of edges connected one to another by snapping their end vertices.
118
+ int edges_fixed = 0;
119
+ // Number of faces removed because they were degenerated.
120
+ int degenerate_facets = 0;
121
+ // Total number of facets removed: Degenerate faces and unconnected faces.
122
+ int facets_removed = 0;
123
+ // Number of faces added by hole filling.
124
+ int facets_added = 0;
125
+ // Number of faces reversed because of negative volume or because one patch was connected to another patch with incompatible normals.
126
+ int facets_reversed = 0;
127
+ // Number of incompatible edges remaining after the patches were connected together and possibly their normals flipped.
128
+ int backwards_edges = 0;
129
+ // Number of triangles, which were flipped during the fixing process.
130
+ int normals_fixed = 0;
131
+ // Number of connected triangle patches.
132
+ int number_of_parts = 0;
133
+
134
+ void clear() { *this = stl_stats(); }
135
+ };
136
+
137
+ struct stl_file {
138
+ stl_file() {}
139
+
140
+ void clear() {
141
+ this->facet_start.clear();
142
+ this->neighbors_start.clear();
143
+ this->stats.clear();
144
+ }
145
+
146
+ size_t memsize() const {
147
+ return sizeof(*this) + sizeof(stl_facet) * facet_start.size() + sizeof(stl_neighbors) * neighbors_start.size();
148
+ }
149
+
150
+ std::vector<stl_facet> facet_start;
151
+ std::vector<stl_neighbors> neighbors_start;
152
+ // Statistics
153
+ stl_stats stats;
154
+ };
155
+
156
+ struct indexed_triangle_set
157
+ {
158
+ void clear() { indices.clear(); vertices.clear(); }
159
+
160
+ size_t memsize() const {
161
+ return sizeof(*this) + sizeof(stl_triangle_vertex_indices) * indices.size() + sizeof(stl_vertex) * vertices.size();
162
+ }
163
+
164
+ std::vector<stl_triangle_vertex_indices> indices;
165
+ std::vector<stl_vertex> vertices;
166
+
167
+ bool empty() const { return indices.empty() || vertices.empty(); }
168
+ bool operator==(const indexed_triangle_set& other) const { return this->indices == other.indices && this->vertices == other.vertices; }
169
+ };
170
+
171
+ extern bool stl_open(stl_file *stl, const char *file);
172
+ extern void stl_stats_out(stl_file *stl, FILE *file, char *input_file);
173
+ extern bool stl_print_neighbors(stl_file *stl, char *file);
174
+ extern bool stl_write_ascii(stl_file *stl, const char *file, const char *label);
175
+ extern bool stl_write_binary(stl_file *stl, const char *file, const char *label);
176
+ extern void stl_check_facets_exact(stl_file *stl);
177
+ extern void stl_check_facets_nearby(stl_file *stl, float tolerance);
178
+ extern void stl_remove_unconnected_facets(stl_file *stl);
179
+ extern void stl_write_vertex(stl_file *stl, int facet, int vertex);
180
+ extern void stl_write_facet(stl_file *stl, char *label, int facet);
181
+ extern void stl_write_neighbor(stl_file *stl, int facet);
182
+ extern bool stl_write_quad_object(stl_file *stl, char *file);
183
+ extern void stl_verify_neighbors(stl_file *stl);
184
+ extern void stl_fill_holes(stl_file *stl);
185
+ extern void stl_fix_normal_directions(stl_file *stl);
186
+ extern void stl_fix_normal_values(stl_file *stl);
187
+ extern void stl_reverse_all_facets(stl_file *stl);
188
+ extern void stl_translate(stl_file *stl, float x, float y, float z);
189
+ extern void stl_translate_relative(stl_file *stl, float x, float y, float z);
190
+ extern void stl_scale_versor(stl_file *stl, const stl_vertex &versor);
191
+ inline void stl_scale(stl_file *stl, float factor) { stl_scale_versor(stl, stl_vertex(factor, factor, factor)); }
192
+ extern void stl_rotate_x(stl_file *stl, float angle);
193
+ extern void stl_rotate_y(stl_file *stl, float angle);
194
+ extern void stl_rotate_z(stl_file *stl, float angle);
195
+ extern void stl_mirror_xy(stl_file *stl);
196
+ extern void stl_mirror_yz(stl_file *stl);
197
+ extern void stl_mirror_xz(stl_file *stl);
198
+
199
+ extern void stl_get_size(stl_file *stl);
200
+
201
+ // the following function is not used
202
+ /*
203
+ template<typename T>
204
+ extern void stl_transform(stl_file *stl, T *trafo3x4)
205
+ {
206
+ Eigen::Matrix<T, 3, 3, Eigen::DontAlign> trafo3x3;
207
+ for (int i = 0; i < 3; ++i)
208
+ {
209
+ for (int j = 0; j < 3; ++j)
210
+ {
211
+ trafo3x3(i, j) = (i * 4) + j;
212
+ }
213
+ }
214
+ Eigen::Matrix<T, 3, 3, Eigen::DontAlign> r = trafo3x3.inverse().transpose();
215
+ for (uint32_t i_face = 0; i_face < stl->stats.number_of_facets; ++ i_face) {
216
+ stl_facet &face = stl->facet_start[i_face];
217
+ for (int i_vertex = 0; i_vertex < 3; ++ i_vertex) {
218
+ stl_vertex &v_dst = face.vertex[i_vertex];
219
+ stl_vertex v_src = v_dst;
220
+ v_dst(0) = T(trafo3x4[0] * v_src(0) + trafo3x4[1] * v_src(1) + trafo3x4[2] * v_src(2) + trafo3x4[3]);
221
+ v_dst(1) = T(trafo3x4[4] * v_src(0) + trafo3x4[5] * v_src(1) + trafo3x4[6] * v_src(2) + trafo3x4[7]);
222
+ v_dst(2) = T(trafo3x4[8] * v_src(0) + trafo3x4[9] * v_src(1) + trafo3x4[10] * v_src(2) + trafo3x4[11]);
223
+ }
224
+ face.normal = (r * face.normal.template cast<T>()).template cast<float>().eval();
225
+ }
226
+
227
+ stl_get_size(stl);
228
+ }
229
+ */
230
+
231
+ template<typename T>
232
+ inline void stl_transform(stl_file *stl, const Eigen::Transform<T, 3, Eigen::Affine, Eigen::DontAlign>& t)
233
+ {
234
+ const Eigen::Matrix<T, 3, 3, Eigen::DontAlign> r = t.matrix().template block<3, 3>(0, 0).inverse().transpose();
235
+ for (size_t i = 0; i < stl->stats.number_of_facets; ++ i) {
236
+ stl_facet &f = stl->facet_start[i];
237
+ for (size_t j = 0; j < 3; ++j)
238
+ f.vertex[j] = (t * f.vertex[j].template cast<T>()).template cast<float>().eval();
239
+ f.normal = (r * f.normal.template cast<T>()).template cast<float>().eval();
240
+ }
241
+
242
+ stl_get_size(stl);
243
+ }
244
+
245
+ template<typename T>
246
+ inline void stl_transform(stl_file *stl, const Eigen::Matrix<T, 3, 3, Eigen::DontAlign>& m)
247
+ {
248
+ const Eigen::Matrix<T, 3, 3, Eigen::DontAlign> r = m.inverse().transpose();
249
+ for (size_t i = 0; i < stl->stats.number_of_facets; ++ i) {
250
+ stl_facet &f = stl->facet_start[i];
251
+ for (size_t j = 0; j < 3; ++j)
252
+ f.vertex[j] = (m * f.vertex[j].template cast<T>()).template cast<float>().eval();
253
+ f.normal = (r * f.normal.template cast<T>()).template cast<float>().eval();
254
+ }
255
+
256
+ stl_get_size(stl);
257
+ }
258
+
259
+ template<typename V>
260
+ inline void its_translate(indexed_triangle_set &its, const V v)
261
+ {
262
+ for (stl_vertex &v_dst : its.vertices)
263
+ v_dst += v;
264
+ }
265
+
266
+ template<typename T>
267
+ inline void its_transform(indexed_triangle_set &its, T *trafo3x4)
268
+ {
269
+ for (stl_vertex &v_dst : its.vertices) {
270
+ stl_vertex v_src = v_dst;
271
+ v_dst(0) = T(trafo3x4[0] * v_src(0) + trafo3x4[1] * v_src(1) + trafo3x4[2] * v_src(2) + trafo3x4[3]);
272
+ v_dst(1) = T(trafo3x4[4] * v_src(0) + trafo3x4[5] * v_src(1) + trafo3x4[6] * v_src(2) + trafo3x4[7]);
273
+ v_dst(2) = T(trafo3x4[8] * v_src(0) + trafo3x4[9] * v_src(1) + trafo3x4[10] * v_src(2) + trafo3x4[11]);
274
+ }
275
+ }
276
+
277
+ template<typename T>
278
+ inline void its_transform(indexed_triangle_set &its, const Eigen::Transform<T, 3, Eigen::Affine, Eigen::DontAlign>& t, bool fix_left_handed = false)
279
+ {
280
+ //const Eigen::Matrix<double, 3, 3, Eigen::DontAlign> r = t.matrix().template block<3, 3>(0, 0);
281
+ for (stl_vertex &v : its.vertices)
282
+ v = (t * v.template cast<T>()).template cast<float>().eval();
283
+ if (fix_left_handed && t.matrix().block(0, 0, 3, 3).determinant() < 0.)
284
+ for (stl_triangle_vertex_indices &i : its.indices)
285
+ std::swap(i[0], i[1]);
286
+ }
287
+
288
+ template<typename T>
289
+ inline void its_transform(indexed_triangle_set &its, const Eigen::Matrix<T, 3, 3, Eigen::DontAlign>& m, bool fix_left_handed = false)
290
+ {
291
+ for (stl_vertex &v : its.vertices)
292
+ v = (m * v.template cast<T>()).template cast<float>().eval();
293
+ if (fix_left_handed && m.determinant() < 0.)
294
+ for (stl_triangle_vertex_indices &i : its.indices)
295
+ std::swap(i[0], i[1]);
296
+ }
297
+
298
+ extern void its_rotate_x(indexed_triangle_set &its, float angle);
299
+ extern void its_rotate_y(indexed_triangle_set &its, float angle);
300
+ extern void its_rotate_z(indexed_triangle_set &its, float angle);
301
+
302
+ extern void stl_generate_shared_vertices(stl_file *stl, indexed_triangle_set &its);
303
+ extern bool its_write_obj(const indexed_triangle_set &its, const char *file);
304
+ extern bool its_write_off(const indexed_triangle_set &its, const char *file);
305
+ extern bool its_write_vrml(const indexed_triangle_set &its, const char *file);
306
+
307
+
308
+ typedef Eigen::Matrix<float, 3, 1, Eigen::DontAlign> obj_color; // Vec3f
309
+ /// <summary>
310
+ /// write idexed triangle set into obj file with color
311
+ /// </summary>
312
+ /// <param name="its">input model</param>
313
+ /// <param name="color">color of stored model</param>
314
+ /// <param name="file">define place to store</param>
315
+ /// <returns>True on success otherwise FALSE</returns>
316
+ extern bool its_write_obj(const indexed_triangle_set& its, const std::vector<obj_color> &color, const char* file);
317
+
318
+ extern bool stl_write_dxf(stl_file *stl, const char *file, char *label);
319
+ inline void stl_calculate_normal(stl_normal &normal, stl_facet *facet) {
320
+ normal = (facet->vertex[1] - facet->vertex[0]).cross(facet->vertex[2] - facet->vertex[0]);
321
+ }
322
+ inline void stl_normalize_vector(stl_normal &normal) {
323
+ double length = normal.cast<double>().norm();
324
+ if (length < 0.000000000001)
325
+ normal = stl_normal::Zero();
326
+ else
327
+ normal *= float(1.0 / length);
328
+ }
329
+ extern void stl_calculate_volume(stl_file *stl);
330
+
331
+ extern void stl_repair(stl_file *stl, bool fixall_flag, bool exact_flag, bool tolerance_flag, float tolerance, bool increment_flag, float increment, bool nearby_flag, int iterations, bool remove_unconnected_flag, bool fill_holes_flag, bool normal_directions_flag, bool normal_values_flag, bool reverse_all_flag, bool verbose_flag);
332
+
333
+ extern void stl_allocate(stl_file *stl);
334
+ extern void stl_read(stl_file *stl, int first_facet, bool first);
335
+ extern void stl_facet_stats(stl_file *stl, stl_facet facet, bool &first);
336
+ extern void stl_reallocate(stl_file *stl);
337
+ extern void stl_add_facet(stl_file *stl, const stl_facet *new_facet);
338
+
339
+ // Validate the mesh, assert on error.
340
+ extern bool stl_validate(const stl_file *stl);
341
+ extern bool stl_validate(const stl_file *stl, const indexed_triangle_set &its);
342
+
343
+ #endif
data/bundled_deps/admesh/admesh/stl_io.cpp ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ADMesh -- process triangulated solid meshes
2
+ * Copyright (C) 1995, 1996 Anthony D. Martin <[email protected]>
3
+ * Copyright (C) 2013, 2014 several contributors, see AUTHORS
4
+ *
5
+ * This program is free software; you can redistribute it and/or modify
6
+ * it under the terms of the GNU General Public License as published by
7
+ * the Free Software Foundation; either version 2 of the License, or
8
+ * (at your option) any later version.
9
+
10
+ * This program is distributed in the hope that it will be useful,
11
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
+ * GNU General Public License for more details.
14
+
15
+ * You should have received a copy of the GNU General Public License along
16
+ * with this program; if not, write to the Free Software Foundation, Inc.,
17
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18
+ *
19
+ * Questions, comments, suggestions, etc to
20
+ * https://github.com/admesh/admesh/issues
21
+ */
22
+
23
+ #include <stdlib.h>
24
+ #include <string.h>
25
+
26
+ #include <boost/log/trivial.hpp>
27
+ #include <boost/nowide/cstdio.hpp>
28
+ #include <boost/predef/other/endian.h>
29
+
30
+ #include "stl.h"
31
+
32
+ void stl_stats_out(stl_file *stl, FILE *file, char *input_file)
33
+ {
34
+ // This is here for Slic3r, without our config.h it won't use this part of the code anyway.
35
+ #ifndef VERSION
36
+ #define VERSION "unknown"
37
+ #endif
38
+ fprintf(file, "\n================= Results produced by ADMesh version " VERSION " ================\n");
39
+ fprintf(file, "Input file : %s\n", input_file);
40
+ if (stl->stats.type == binary)
41
+ fprintf(file, "File type : Binary STL file\n");
42
+ else
43
+ fprintf(file, "File type : ASCII STL file\n");
44
+ fprintf(file, "Header : %s\n", stl->stats.header);
45
+ fprintf(file, "============== Size ==============\n");
46
+ fprintf(file, "Min X = % f, Max X = % f\n", stl->stats.min(0), stl->stats.max(0));
47
+ fprintf(file, "Min Y = % f, Max Y = % f\n", stl->stats.min(1), stl->stats.max(1));
48
+ fprintf(file, "Min Z = % f, Max Z = % f\n", stl->stats.min(2), stl->stats.max(2));
49
+ fprintf(file, "========= Facet Status ========== Original ============ Final ====\n");
50
+ fprintf(file, "Number of facets : %5d %5d\n", stl->stats.original_num_facets, stl->stats.number_of_facets);
51
+ fprintf(file, "Facets with 1 disconnected edge : %5d %5d\n",
52
+ stl->stats.facets_w_1_bad_edge, stl->stats.connected_facets_2_edge - stl->stats.connected_facets_3_edge);
53
+ fprintf(file, "Facets with 2 disconnected edges : %5d %5d\n",
54
+ stl->stats.facets_w_2_bad_edge, stl->stats.connected_facets_1_edge - stl->stats.connected_facets_2_edge);
55
+ fprintf(file, "Facets with 3 disconnected edges : %5d %5d\n",
56
+ stl->stats.facets_w_3_bad_edge, stl->stats.number_of_facets - stl->stats.connected_facets_1_edge);
57
+ fprintf(file, "Total disconnected facets : %5d %5d\n",
58
+ stl->stats.facets_w_1_bad_edge + stl->stats.facets_w_2_bad_edge + stl->stats.facets_w_3_bad_edge, stl->stats.number_of_facets - stl->stats.connected_facets_3_edge);
59
+ fprintf(file, "=== Processing Statistics === ===== Other Statistics =====\n");
60
+ fprintf(file, "Number of parts : %5d Volume : %f\n", stl->stats.number_of_parts, stl->stats.volume);
61
+ fprintf(file, "Degenerate facets : %5d\n", stl->stats.degenerate_facets);
62
+ fprintf(file, "Edges fixed : %5d\n", stl->stats.edges_fixed);
63
+ fprintf(file, "Facets removed : %5d\n", stl->stats.facets_removed);
64
+ fprintf(file, "Facets added : %5d\n", stl->stats.facets_added);
65
+ fprintf(file, "Facets reversed : %5d\n", stl->stats.facets_reversed);
66
+ fprintf(file, "Backwards edges : %5d\n", stl->stats.backwards_edges);
67
+ fprintf(file, "Normals fixed : %5d\n", stl->stats.normals_fixed);
68
+ }
69
+
70
+ bool stl_write_ascii(stl_file *stl, const char *file, const char *label)
71
+ {
72
+ FILE *fp = boost::nowide::fopen(file, "w");
73
+ if (fp == nullptr) {
74
+ BOOST_LOG_TRIVIAL(error) << "stl_write_ascii: Couldn't open " << file << " for writing";
75
+ return false;
76
+ }
77
+
78
+ fprintf(fp, "solid %s\n", label);
79
+
80
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i) {
81
+ fprintf(fp, " facet normal % .8E % .8E % .8E\n", stl->facet_start[i].normal(0), stl->facet_start[i].normal(1), stl->facet_start[i].normal(2));
82
+ fprintf(fp, " outer loop\n");
83
+ fprintf(fp, " vertex % .8E % .8E % .8E\n", stl->facet_start[i].vertex[0](0), stl->facet_start[i].vertex[0](1), stl->facet_start[i].vertex[0](2));
84
+ fprintf(fp, " vertex % .8E % .8E % .8E\n", stl->facet_start[i].vertex[1](0), stl->facet_start[i].vertex[1](1), stl->facet_start[i].vertex[1](2));
85
+ fprintf(fp, " vertex % .8E % .8E % .8E\n", stl->facet_start[i].vertex[2](0), stl->facet_start[i].vertex[2](1), stl->facet_start[i].vertex[2](2));
86
+ fprintf(fp, " endloop\n");
87
+ fprintf(fp, " endfacet\n");
88
+ }
89
+
90
+ fprintf(fp, "endsolid %s\n", label);
91
+ fclose(fp);
92
+ return true;
93
+ }
94
+
95
+ bool stl_print_neighbors(stl_file *stl, char *file)
96
+ {
97
+ FILE *fp = boost::nowide::fopen(file, "w");
98
+ if (fp == nullptr) {
99
+ BOOST_LOG_TRIVIAL(error) << "stl_print_neighbors: Couldn't open " << file << " for writing";
100
+ return false;
101
+ }
102
+
103
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i) {
104
+ fprintf(fp, "%d, %d,%d, %d,%d, %d,%d\n",
105
+ i,
106
+ stl->neighbors_start[i].neighbor[0],
107
+ (int)stl->neighbors_start[i].which_vertex_not[0],
108
+ stl->neighbors_start[i].neighbor[1],
109
+ (int)stl->neighbors_start[i].which_vertex_not[1],
110
+ stl->neighbors_start[i].neighbor[2],
111
+ (int)stl->neighbors_start[i].which_vertex_not[2]);
112
+ }
113
+ fclose(fp);
114
+ return true;
115
+ }
116
+
117
+ #if BOOST_ENDIAN_BIG_BYTE
118
+ // Swap a buffer of 32bit data from little endian to big endian and vice versa.
119
+ void stl_internal_reverse_quads(char *buf, size_t cnt)
120
+ {
121
+ for (size_t i = 0; i < cnt; i += 4) {
122
+ std::swap(buf[i], buf[i+3]);
123
+ std::swap(buf[i+1], buf[i+2]);
124
+ }
125
+ }
126
+ #endif
127
+
128
+ bool stl_write_binary(stl_file *stl, const char *file, const char *label)
129
+ {
130
+ FILE *fp = boost::nowide::fopen(file, "wb");
131
+ if (fp == nullptr) {
132
+ BOOST_LOG_TRIVIAL(error) << "stl_write_binary: Couldn't open " << file << " for writing";
133
+ return false;
134
+ }
135
+
136
+ fprintf(fp, "%s", label);
137
+ for (size_t i = strlen(label); i < LABEL_SIZE; ++ i)
138
+ putc(0, fp);
139
+
140
+ #if !defined(SEEK_SET)
141
+ #define SEEK_SET 0
142
+ #endif
143
+ fseek(fp, LABEL_SIZE, SEEK_SET);
144
+ #if BOOST_ENDIAN_LITTLE_BYTE
145
+ fwrite(&stl->stats.number_of_facets, 4, 1, fp);
146
+ for (const stl_facet &facet : stl->facet_start)
147
+ fwrite(&facet, SIZEOF_STL_FACET, 1, fp);
148
+ #else /* BOOST_ENDIAN_LITTLE_BYTE */
149
+ char buffer[50];
150
+ // Convert the number of facets to little endian.
151
+ memcpy(buffer, &stl->stats.number_of_facets, 4);
152
+ stl_internal_reverse_quads(buffer, 4);
153
+ fwrite(buffer, 4, 1, fp);
154
+ for (const stl_facet &facet : stl->facet_start) {
155
+ memcpy(buffer, &facet, 50);
156
+ // Convert to little endian.
157
+ stl_internal_reverse_quads(buffer, 48);
158
+ fwrite(buffer, SIZEOF_STL_FACET, 1, fp);
159
+ }
160
+ #endif /* BOOST_ENDIAN_LITTLE_BYTE */
161
+ fclose(fp);
162
+ return true;
163
+ }
164
+
165
+ void stl_write_vertex(stl_file *stl, int facet, int vertex)
166
+ {
167
+ printf(" vertex %d/%d % .8E % .8E % .8E\n", vertex, facet,
168
+ stl->facet_start[facet].vertex[vertex](0),
169
+ stl->facet_start[facet].vertex[vertex](1),
170
+ stl->facet_start[facet].vertex[vertex](2));
171
+ }
172
+
173
+ void stl_write_facet(stl_file *stl, char *label, int facet)
174
+ {
175
+ printf("facet (%d)/ %s\n", facet, label);
176
+ stl_write_vertex(stl, facet, 0);
177
+ stl_write_vertex(stl, facet, 1);
178
+ stl_write_vertex(stl, facet, 2);
179
+ }
180
+
181
+ void stl_write_neighbor(stl_file *stl, int facet)
182
+ {
183
+ printf("Neighbors %d: %d, %d, %d ; %d, %d, %d\n", facet,
184
+ stl->neighbors_start[facet].neighbor[0],
185
+ stl->neighbors_start[facet].neighbor[1],
186
+ stl->neighbors_start[facet].neighbor[2],
187
+ stl->neighbors_start[facet].which_vertex_not[0],
188
+ stl->neighbors_start[facet].which_vertex_not[1],
189
+ stl->neighbors_start[facet].which_vertex_not[2]);
190
+ }
191
+
192
+ bool stl_write_quad_object(stl_file *stl, char *file)
193
+ {
194
+ stl_vertex connect_color = stl_vertex::Zero();
195
+ stl_vertex uncon_1_color = stl_vertex::Zero();
196
+ stl_vertex uncon_2_color = stl_vertex::Zero();
197
+ stl_vertex uncon_3_color = stl_vertex::Zero();
198
+ stl_vertex color;
199
+
200
+ FILE *fp = boost::nowide::fopen(file, "w");
201
+ if (fp == nullptr) {
202
+ BOOST_LOG_TRIVIAL(error) << "stl_write_quad_object: Couldn't open " << file << " for writing";
203
+ return false;
204
+ }
205
+
206
+ fprintf(fp, "CQUAD\n");
207
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i) {
208
+ switch (stl->neighbors_start[i].num_neighbors()) {
209
+ case 0:
210
+ default: color = uncon_3_color; break;
211
+ case 1: color = uncon_2_color; break;
212
+ case 2: color = uncon_1_color; break;
213
+ case 3: color = connect_color; break;
214
+ }
215
+ fprintf(fp, "%f %f %f %1.1f %1.1f %1.1f 1\n", stl->facet_start[i].vertex[0](0), stl->facet_start[i].vertex[0](1), stl->facet_start[i].vertex[0](2), color(0), color(1), color(2));
216
+ fprintf(fp, "%f %f %f %1.1f %1.1f %1.1f 1\n", stl->facet_start[i].vertex[1](0), stl->facet_start[i].vertex[1](1), stl->facet_start[i].vertex[1](2), color(0), color(1), color(2));
217
+ fprintf(fp, "%f %f %f %1.1f %1.1f %1.1f 1\n", stl->facet_start[i].vertex[2](0), stl->facet_start[i].vertex[2](1), stl->facet_start[i].vertex[2](2), color(0), color(1), color(2));
218
+ fprintf(fp, "%f %f %f %1.1f %1.1f %1.1f 1\n", stl->facet_start[i].vertex[2](0), stl->facet_start[i].vertex[2](1), stl->facet_start[i].vertex[2](2), color(0), color(1), color(2));
219
+ }
220
+ fclose(fp);
221
+ return true;
222
+ }
223
+
224
+ bool stl_write_dxf(stl_file *stl, const char *file, char *label)
225
+ {
226
+ FILE *fp = boost::nowide::fopen(file, "w");
227
+ if (fp == nullptr) {
228
+ BOOST_LOG_TRIVIAL(error) << "stl_write_quad_object: Couldn't open " << file << " for writing";
229
+ return false;
230
+ }
231
+
232
+ fprintf(fp, "999\n%s\n", label);
233
+ fprintf(fp, "0\nSECTION\n2\nHEADER\n0\nENDSEC\n");
234
+ fprintf(fp, "0\nSECTION\n2\nTABLES\n0\nTABLE\n2\nLAYER\n70\n1\n\
235
+ 0\nLAYER\n2\n0\n70\n0\n62\n7\n6\nCONTINUOUS\n0\nENDTAB\n0\nENDSEC\n");
236
+ fprintf(fp, "0\nSECTION\n2\nBLOCKS\n0\nENDSEC\n");
237
+
238
+ fprintf(fp, "0\nSECTION\n2\nENTITIES\n");
239
+
240
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i) {
241
+ fprintf(fp, "0\n3DFACE\n8\n0\n");
242
+ fprintf(fp, "10\n%f\n20\n%f\n30\n%f\n", stl->facet_start[i].vertex[0](0), stl->facet_start[i].vertex[0](1), stl->facet_start[i].vertex[0](2));
243
+ fprintf(fp, "11\n%f\n21\n%f\n31\n%f\n", stl->facet_start[i].vertex[1](0), stl->facet_start[i].vertex[1](1), stl->facet_start[i].vertex[1](2));
244
+ fprintf(fp, "12\n%f\n22\n%f\n32\n%f\n", stl->facet_start[i].vertex[2](0), stl->facet_start[i].vertex[2](1), stl->facet_start[i].vertex[2](2));
245
+ fprintf(fp, "13\n%f\n23\n%f\n33\n%f\n", stl->facet_start[i].vertex[2](0), stl->facet_start[i].vertex[2](1), stl->facet_start[i].vertex[2](2));
246
+ }
247
+
248
+ fprintf(fp, "0\nENDSEC\n0\nEOF\n");
249
+ fclose(fp);
250
+ return true;
251
+ }
data/bundled_deps/admesh/admesh/stlinit.cpp ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ADMesh -- process triangulated solid meshes
2
+ * Copyright (C) 1995, 1996 Anthony D. Martin <[email protected]>
3
+ * Copyright (C) 2013, 2014 several contributors, see AUTHORS
4
+ *
5
+ * This program is free software; you can redistribute it and/or modify
6
+ * it under the terms of the GNU General Public License as published by
7
+ * the Free Software Foundation; either version 2 of the License, or
8
+ * (at your option) any later version.
9
+
10
+ * This program is distributed in the hope that it will be useful,
11
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
+ * GNU General Public License for more details.
14
+
15
+ * You should have received a copy of the GNU General Public License along
16
+ * with this program; if not, write to the Free Software Foundation, Inc.,
17
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18
+ *
19
+ * Questions, comments, suggestions, etc to
20
+ * https://github.com/admesh/admesh/issues
21
+ */
22
+
23
+ #include <stdio.h>
24
+ #include <stdlib.h>
25
+ #include <string.h>
26
+ #include <math.h>
27
+ #include <assert.h>
28
+
29
+ #include <boost/log/trivial.hpp>
30
+ #include <boost/nowide/cstdio.hpp>
31
+ #include <boost/predef/other/endian.h>
32
+
33
+ #include "stl.h"
34
+
35
+ #include <LocalesUtils.hpp>
36
+
37
+ #ifndef SEEK_SET
38
+ #error "SEEK_SET not defined"
39
+ #endif
40
+
41
+ #if BOOST_ENDIAN_BIG_BYTE
42
+ extern void stl_internal_reverse_quads(char *buf, size_t cnt);
43
+ #endif /* BOOST_ENDIAN_BIG_BYTE */
44
+
45
+ static FILE* stl_open_count_facets(stl_file *stl, const char *file)
46
+ {
47
+ // Open the file in binary mode first.
48
+ FILE *fp = boost::nowide::fopen(file, "rb");
49
+ if (fp == nullptr) {
50
+ BOOST_LOG_TRIVIAL(error) << "stl_open_count_facets: Couldn't open " << file << " for reading";
51
+ return nullptr;
52
+ }
53
+ // Find size of file.
54
+ fseek(fp, 0, SEEK_END);
55
+ long file_size = ftell(fp);
56
+
57
+ // Check for binary or ASCII file.
58
+ fseek(fp, HEADER_SIZE, SEEK_SET);
59
+ unsigned char chtest[128];
60
+ if (! fread(chtest, sizeof(chtest), 1, fp)) {
61
+ BOOST_LOG_TRIVIAL(error) << "stl_open_count_facets: The input is an empty file: " << file;
62
+ fclose(fp);
63
+ return nullptr;
64
+ }
65
+ stl->stats.type = ascii;
66
+ for (size_t s = 0; s < sizeof(chtest); s++) {
67
+ if (chtest[s] > 127) {
68
+ stl->stats.type = binary;
69
+ break;
70
+ }
71
+ }
72
+ rewind(fp);
73
+
74
+ uint32_t num_facets = 0;
75
+
76
+ // Get the header and the number of facets in the .STL file.
77
+ // If the .STL file is binary, then do the following:
78
+ if (stl->stats.type == binary) {
79
+ // Test if the STL file has the right size.
80
+ if (((file_size - HEADER_SIZE) % SIZEOF_STL_FACET != 0) || (file_size < STL_MIN_FILE_SIZE)) {
81
+ BOOST_LOG_TRIVIAL(error) << "stl_open_count_facets: The file " << file << " has the wrong size.";
82
+ fclose(fp);
83
+ return nullptr;
84
+ }
85
+ num_facets = (file_size - HEADER_SIZE) / SIZEOF_STL_FACET;
86
+
87
+ // Read the header.
88
+ if (fread(stl->stats.header, LABEL_SIZE, 1, fp) > 79)
89
+ stl->stats.header[80] = '\0';
90
+
91
+ // Read the int following the header. This should contain # of facets.
92
+ uint32_t header_num_facets;
93
+ bool header_num_faces_read = fread(&header_num_facets, sizeof(uint32_t), 1, fp) != 0;
94
+ #if BOOST_ENDIAN_BIG_BYTE
95
+ // Convert from little endian to big endian.
96
+ stl_internal_reverse_quads((char*)&header_num_facets, 4);
97
+ #endif /* BOOST_ENDIAN_BIG_BYTE */
98
+ if (! header_num_faces_read || num_facets != header_num_facets)
99
+ BOOST_LOG_TRIVIAL(info) << "stl_open_count_facets: Warning: File size doesn't match number of facets in the header: " << file;
100
+ }
101
+ // Otherwise, if the .STL file is ASCII, then do the following:
102
+ else
103
+ {
104
+ // Reopen the file in text mode (for getting correct newlines on Windows)
105
+ // fix to silence a warning about unused return value.
106
+ // obviously if it fails we have problems....
107
+ fp = boost::nowide::freopen(file, "r", fp);
108
+
109
+ // do another null check to be safe
110
+ if (fp == nullptr) {
111
+ BOOST_LOG_TRIVIAL(error) << "stl_open_count_facets: Couldn't open " << file << " for reading";
112
+ fclose(fp);
113
+ return nullptr;
114
+ }
115
+
116
+ // Find the number of facets.
117
+ char linebuf[100];
118
+ int num_lines = 1;
119
+ while (fgets(linebuf, 100, fp) != nullptr) {
120
+ // Don't count short lines.
121
+ if (strlen(linebuf) <= 4)
122
+ continue;
123
+ // Skip solid/endsolid lines as broken STL file generators may put several of them.
124
+ if (strncmp(linebuf, "solid", 5) == 0 || strncmp(linebuf, "endsolid", 8) == 0)
125
+ continue;
126
+ ++ num_lines;
127
+ }
128
+
129
+ rewind(fp);
130
+
131
+ // Get the header.
132
+ int i = 0;
133
+ for (; i < 80 && (stl->stats.header[i] = getc(fp)) != '\n'; ++ i) ;
134
+ stl->stats.header[i] = '\0'; // Lose the '\n'
135
+ stl->stats.header[80] = '\0';
136
+
137
+ num_facets = num_lines / ASCII_LINES_PER_FACET;
138
+ }
139
+
140
+ stl->stats.number_of_facets += num_facets;
141
+ stl->stats.original_num_facets = stl->stats.number_of_facets;
142
+ return fp;
143
+ }
144
+
145
+ /* Reads the contents of the file pointed to by fp into the stl structure,
146
+ starting at facet first_facet. The second argument says if it's our first
147
+ time running this for the stl and therefore we should reset our max and min stats. */
148
+ static bool stl_read(stl_file *stl, FILE *fp, int first_facet, bool first)
149
+ {
150
+ if (stl->stats.type == binary)
151
+ fseek(fp, HEADER_SIZE, SEEK_SET);
152
+ else
153
+ rewind(fp);
154
+
155
+ char normal_buf[3][32];
156
+ for (uint32_t i = first_facet; i < stl->stats.number_of_facets; ++ i) {
157
+ stl_facet facet;
158
+
159
+ if (stl->stats.type == binary) {
160
+ // Read a single facet from a binary .STL file. We assume little-endian architecture!
161
+ if (fread(&facet, 1, SIZEOF_STL_FACET, fp) != SIZEOF_STL_FACET)
162
+ return false;
163
+ #if BOOST_ENDIAN_BIG_BYTE
164
+ // Convert the loaded little endian data to big endian.
165
+ stl_internal_reverse_quads((char*)&facet, 48);
166
+ #endif /* BOOST_ENDIAN_BIG_BYTE */
167
+ } else {
168
+ // Read a single facet from an ASCII .STL file
169
+ // skip solid/endsolid
170
+ // (in this order, otherwise it won't work when they are paired in the middle of a file)
171
+ fscanf(fp, " endsolid%*[^\n]\n");
172
+ fscanf(fp, " solid%*[^\n]\n"); // name might contain spaces so %*s doesn't work and it also can be empty (just "solid")
173
+ // Leading space in the fscanf format skips all leading white spaces including numerous new lines and tabs.
174
+ int res_normal = fscanf(fp, " facet normal %31s %31s %31s", normal_buf[0], normal_buf[1], normal_buf[2]);
175
+ assert(res_normal == 3);
176
+ int res_outer_loop = fscanf(fp, " outer loop");
177
+ assert(res_outer_loop == 0);
178
+ int res_vertex1 = fscanf(fp, " vertex %f %f %f", &facet.vertex[0](0), &facet.vertex[0](1), &facet.vertex[0](2));
179
+ assert(res_vertex1 == 3);
180
+ int res_vertex2 = fscanf(fp, " vertex %f %f %f", &facet.vertex[1](0), &facet.vertex[1](1), &facet.vertex[1](2));
181
+ assert(res_vertex2 == 3);
182
+ // Trailing whitespace is there to eat all whitespaces and empty lines up to the next non-whitespace.
183
+ int res_vertex3 = fscanf(fp, " vertex %f %f %f ", &facet.vertex[2](0), &facet.vertex[2](1), &facet.vertex[2](2));
184
+ assert(res_vertex3 == 3);
185
+ // Some G-code generators tend to produce text after "endloop" and "endfacet". Just ignore it.
186
+ char buf[2048];
187
+ fgets(buf, 2047, fp);
188
+ bool endloop_ok = strncmp(buf, "endloop", 7) == 0 && (buf[7] == '\r' || buf[7] == '\n' || buf[7] == ' ' || buf[7] == '\t');
189
+ assert(endloop_ok);
190
+ // Skip the trailing whitespaces and empty lines.
191
+ fscanf(fp, " ");
192
+ fgets(buf, 2047, fp);
193
+ bool endfacet_ok = strncmp(buf, "endfacet", 8) == 0 && (buf[8] == '\r' || buf[8] == '\n' || buf[8] == ' ' || buf[8] == '\t');
194
+ assert(endfacet_ok);
195
+ if (res_normal != 3 || res_outer_loop != 0 || res_vertex1 != 3 || res_vertex2 != 3 || res_vertex3 != 3 || ! endloop_ok || ! endfacet_ok) {
196
+ BOOST_LOG_TRIVIAL(error) << "Something is syntactically very wrong with this ASCII STL! ";
197
+ return false;
198
+ }
199
+
200
+ // The facet normal has been parsed as a single string as to workaround for not a numbers in the normal definition.
201
+ if (sscanf(normal_buf[0], "%f", &facet.normal(0)) != 1 ||
202
+ sscanf(normal_buf[1], "%f", &facet.normal(1)) != 1 ||
203
+ sscanf(normal_buf[2], "%f", &facet.normal(2)) != 1) {
204
+ // Normal was mangled. Maybe denormals or "not a number" were stored?
205
+ // Just reset the normal and silently ignore it.
206
+ memset(&facet.normal, 0, sizeof(facet.normal));
207
+ }
208
+ }
209
+
210
+ #if 0
211
+ // Report close to zero vertex coordinates. Due to the nature of the floating point numbers,
212
+ // close to zero values may be represented with singificantly higher precision than the rest of the vertices.
213
+ // It may be worth to round these numbers to zero during loading to reduce the number of errors reported
214
+ // during the STL import.
215
+ for (size_t j = 0; j < 3; ++ j) {
216
+ if (facet.vertex[j](0) > -1e-12f && facet.vertex[j](0) < 1e-12f)
217
+ printf("stl_read: facet %d(0) = %e\r\n", j, facet.vertex[j](0));
218
+ if (facet.vertex[j](1) > -1e-12f && facet.vertex[j](1) < 1e-12f)
219
+ printf("stl_read: facet %d(1) = %e\r\n", j, facet.vertex[j](1));
220
+ if (facet.vertex[j](2) > -1e-12f && facet.vertex[j](2) < 1e-12f)
221
+ printf("stl_read: facet %d(2) = %e\r\n", j, facet.vertex[j](2));
222
+ }
223
+ #endif
224
+
225
+ // Write the facet into memory.
226
+ stl->facet_start[i] = facet;
227
+ stl_facet_stats(stl, facet, first);
228
+ }
229
+
230
+ stl->stats.size = stl->stats.max - stl->stats.min;
231
+ stl->stats.bounding_diameter = stl->stats.size.norm();
232
+ return true;
233
+ }
234
+
235
+ bool stl_open(stl_file *stl, const char *file)
236
+ {
237
+ Slic3r::CNumericLocalesSetter locales_setter;
238
+ stl->clear();
239
+ FILE *fp = stl_open_count_facets(stl, file);
240
+ if (fp == nullptr)
241
+ return false;
242
+ stl_allocate(stl);
243
+ bool result = stl_read(stl, fp, 0, true);
244
+ fclose(fp);
245
+ return result;
246
+ }
247
+
248
+ void stl_allocate(stl_file *stl)
249
+ {
250
+ // Allocate memory for the entire .STL file.
251
+ stl->facet_start.assign(stl->stats.number_of_facets, stl_facet());
252
+ // Allocate memory for the neighbors list.
253
+ stl->neighbors_start.assign(stl->stats.number_of_facets, stl_neighbors());
254
+ }
255
+
256
+ void stl_reallocate(stl_file *stl)
257
+ {
258
+ stl->facet_start.resize(stl->stats.number_of_facets);
259
+ stl->neighbors_start.resize(stl->stats.number_of_facets);
260
+ }
261
+
262
+ void stl_facet_stats(stl_file *stl, stl_facet facet, bool &first)
263
+ {
264
+ // While we are going through all of the facets, let's find the
265
+ // maximum and minimum values for x, y, and z
266
+
267
+ if (first) {
268
+ // Initialize the max and min values the first time through
269
+ stl->stats.min = facet.vertex[0];
270
+ stl->stats.max = facet.vertex[0];
271
+ stl_vertex diff = (facet.vertex[1] - facet.vertex[0]).cwiseAbs();
272
+ stl->stats.shortest_edge = std::max(diff(0), std::max(diff(1), diff(2)));
273
+ first = false;
274
+ }
275
+
276
+ // Now find the max and min values.
277
+ for (size_t i = 0; i < 3; ++ i) {
278
+ stl->stats.min = stl->stats.min.cwiseMin(facet.vertex[i]);
279
+ stl->stats.max = stl->stats.max.cwiseMax(facet.vertex[i]);
280
+ }
281
+ }
data/bundled_deps/admesh/admesh/util.cpp ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ADMesh -- process triangulated solid meshes
2
+ * Copyright (C) 1995, 1996 Anthony D. Martin <[email protected]>
3
+ * Copyright (C) 2013, 2014 several contributors, see AUTHORS
4
+ *
5
+ * This program is free software; you can redistribute it and/or modify
6
+ * it under the terms of the GNU General Public License as published by
7
+ * the Free Software Foundation; either version 2 of the License, or
8
+ * (at your option) any later version.
9
+
10
+ * This program is distributed in the hope that it will be useful,
11
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
+ * GNU General Public License for more details.
14
+
15
+ * You should have received a copy of the GNU General Public License along
16
+ * with this program; if not, write to the Free Software Foundation, Inc.,
17
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18
+ *
19
+ * Questions, comments, suggestions, etc to
20
+ * https://github.com/admesh/admesh/issues
21
+ */
22
+
23
+ #include <stdio.h>
24
+ #include <stdlib.h>
25
+ #include <string.h>
26
+ #include <math.h>
27
+
28
+ #include <boost/log/trivial.hpp>
29
+
30
+ #include "stl.h"
31
+
32
+ void stl_verify_neighbors(stl_file *stl)
33
+ {
34
+ stl->stats.backwards_edges = 0;
35
+
36
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i) {
37
+ for (int j = 0; j < 3; ++ j) {
38
+ struct stl_edge {
39
+ stl_vertex p1;
40
+ stl_vertex p2;
41
+ int facet_number;
42
+ };
43
+ stl_edge edge_a;
44
+ edge_a.p1 = stl->facet_start[i].vertex[j];
45
+ edge_a.p2 = stl->facet_start[i].vertex[(j + 1) % 3];
46
+ int neighbor = stl->neighbors_start[i].neighbor[j];
47
+ if (neighbor == -1)
48
+ continue; // this edge has no neighbor... Continue.
49
+ int vnot = stl->neighbors_start[i].which_vertex_not[j];
50
+ stl_edge edge_b;
51
+ if (vnot < 3) {
52
+ edge_b.p1 = stl->facet_start[neighbor].vertex[(vnot + 2) % 3];
53
+ edge_b.p2 = stl->facet_start[neighbor].vertex[(vnot + 1) % 3];
54
+ } else {
55
+ stl->stats.backwards_edges += 1;
56
+ edge_b.p1 = stl->facet_start[neighbor].vertex[(vnot + 1) % 3];
57
+ edge_b.p2 = stl->facet_start[neighbor].vertex[(vnot + 2) % 3];
58
+ }
59
+ if (edge_a.p1 != edge_b.p1 || edge_a.p2 != edge_b.p2) {
60
+ // These edges should match but they don't. Print results.
61
+ BOOST_LOG_TRIVIAL(info) << "edge " << j << " of facet " << i << " doesn't match edge " << (vnot + 1) << " of facet " << neighbor;
62
+ stl_write_facet(stl, (char*)"first facet", i);
63
+ stl_write_facet(stl, (char*)"second facet", neighbor);
64
+ }
65
+ }
66
+ }
67
+ }
68
+
69
+ void stl_translate(stl_file *stl, float x, float y, float z)
70
+ {
71
+ stl_vertex new_min(x, y, z);
72
+ stl_vertex shift = new_min - stl->stats.min;
73
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i)
74
+ for (int j = 0; j < 3; ++ j)
75
+ stl->facet_start[i].vertex[j] += shift;
76
+ stl->stats.min = new_min;
77
+ stl->stats.max += shift;
78
+ }
79
+
80
+ /* Translates the stl by x,y,z, relatively from wherever it is currently */
81
+ void stl_translate_relative(stl_file *stl, float x, float y, float z)
82
+ {
83
+ stl_vertex shift(x, y, z);
84
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i)
85
+ for (int j = 0; j < 3; ++ j)
86
+ stl->facet_start[i].vertex[j] += shift;
87
+ stl->stats.min += shift;
88
+ stl->stats.max += shift;
89
+ }
90
+
91
+ void stl_scale_versor(stl_file *stl, const stl_vertex &versor)
92
+ {
93
+ // Scale extents.
94
+ auto s = versor.array();
95
+ stl->stats.min.array() *= s;
96
+ stl->stats.max.array() *= s;
97
+ // Scale size.
98
+ stl->stats.size.array() *= s;
99
+ // Scale volume.
100
+ if (stl->stats.volume > 0.0)
101
+ stl->stats.volume *= versor(0) * versor(1) * versor(2);
102
+ // Scale the mesh.
103
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i)
104
+ for (int j = 0; j < 3; ++ j)
105
+ stl->facet_start[i].vertex[j].array() *= s;
106
+ }
107
+
108
+ static void calculate_normals(stl_file *stl)
109
+ {
110
+ stl_normal normal;
111
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i) {
112
+ stl_calculate_normal(normal, &stl->facet_start[i]);
113
+ stl_normalize_vector(normal);
114
+ stl->facet_start[i].normal = normal;
115
+ }
116
+ }
117
+
118
+ static inline void rotate_point_2d(float &x, float &y, const double c, const double s)
119
+ {
120
+ double xold = x;
121
+ double yold = y;
122
+ x = float(c * xold - s * yold);
123
+ y = float(s * xold + c * yold);
124
+ }
125
+
126
+ void stl_rotate_x(stl_file *stl, float angle)
127
+ {
128
+ double radian_angle = (angle / 180.0) * M_PI;
129
+ double c = cos(radian_angle);
130
+ double s = sin(radian_angle);
131
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i)
132
+ for (int j = 0; j < 3; ++ j)
133
+ rotate_point_2d(stl->facet_start[i].vertex[j](1), stl->facet_start[i].vertex[j](2), c, s);
134
+ stl_get_size(stl);
135
+ calculate_normals(stl);
136
+ }
137
+
138
+ void stl_rotate_y(stl_file *stl, float angle)
139
+ {
140
+ double radian_angle = (angle / 180.0) * M_PI;
141
+ double c = cos(radian_angle);
142
+ double s = sin(radian_angle);
143
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i)
144
+ for (int j = 0; j < 3; ++ j)
145
+ rotate_point_2d(stl->facet_start[i].vertex[j](2), stl->facet_start[i].vertex[j](0), c, s);
146
+ stl_get_size(stl);
147
+ calculate_normals(stl);
148
+ }
149
+
150
+ void stl_rotate_z(stl_file *stl, float angle)
151
+ {
152
+ double radian_angle = (angle / 180.0) * M_PI;
153
+ double c = cos(radian_angle);
154
+ double s = sin(radian_angle);
155
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i)
156
+ for (int j = 0; j < 3; ++ j)
157
+ rotate_point_2d(stl->facet_start[i].vertex[j](0), stl->facet_start[i].vertex[j](1), c, s);
158
+ stl_get_size(stl);
159
+ calculate_normals(stl);
160
+ }
161
+
162
+ void its_rotate_x(indexed_triangle_set &its, float angle)
163
+ {
164
+ double radian_angle = (angle / 180.0) * M_PI;
165
+ double c = cos(radian_angle);
166
+ double s = sin(radian_angle);
167
+ for (stl_vertex &v : its.vertices)
168
+ rotate_point_2d(v(1), v(2), c, s);
169
+ }
170
+
171
+ void its_rotate_y(indexed_triangle_set& its, float angle)
172
+ {
173
+ double radian_angle = (angle / 180.0) * M_PI;
174
+ double c = cos(radian_angle);
175
+ double s = sin(radian_angle);
176
+ for (stl_vertex& v : its.vertices)
177
+ rotate_point_2d(v(2), v(0), c, s);
178
+ }
179
+
180
+ void its_rotate_z(indexed_triangle_set& its, float angle)
181
+ {
182
+ double radian_angle = (angle / 180.0) * M_PI;
183
+ double c = cos(radian_angle);
184
+ double s = sin(radian_angle);
185
+ for (stl_vertex& v : its.vertices)
186
+ rotate_point_2d(v(0), v(1), c, s);
187
+ }
188
+
189
+ void stl_get_size(stl_file *stl)
190
+ {
191
+ if (stl->stats.number_of_facets == 0)
192
+ return;
193
+ stl->stats.min = stl->facet_start[0].vertex[0];
194
+ stl->stats.max = stl->stats.min;
195
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i) {
196
+ const stl_facet &face = stl->facet_start[i];
197
+ for (int j = 0; j < 3; ++ j) {
198
+ stl->stats.min = stl->stats.min.cwiseMin(face.vertex[j]);
199
+ stl->stats.max = stl->stats.max.cwiseMax(face.vertex[j]);
200
+ }
201
+ }
202
+ stl->stats.size = stl->stats.max - stl->stats.min;
203
+ stl->stats.bounding_diameter = stl->stats.size.norm();
204
+ }
205
+
206
+ void stl_mirror_xy(stl_file *stl)
207
+ {
208
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i)
209
+ for (int j = 0; j < 3; ++ j)
210
+ stl->facet_start[i].vertex[j](2) *= -1.0;
211
+ float temp_size = stl->stats.min(2);
212
+ stl->stats.min(2) = stl->stats.max(2);
213
+ stl->stats.max(2) = temp_size;
214
+ stl->stats.min(2) *= -1.0;
215
+ stl->stats.max(2) *= -1.0;
216
+ stl_reverse_all_facets(stl);
217
+ stl->stats.facets_reversed -= stl->stats.number_of_facets; /* for not altering stats */
218
+ }
219
+
220
+ void stl_mirror_yz(stl_file *stl)
221
+ {
222
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i)
223
+ for (int j = 0; j < 3; j++)
224
+ stl->facet_start[i].vertex[j](0) *= -1.0;
225
+ float temp_size = stl->stats.min(0);
226
+ stl->stats.min(0) = stl->stats.max(0);
227
+ stl->stats.max(0) = temp_size;
228
+ stl->stats.min(0) *= -1.0;
229
+ stl->stats.max(0) *= -1.0;
230
+ stl_reverse_all_facets(stl);
231
+ stl->stats.facets_reversed -= stl->stats.number_of_facets; /* for not altering stats */
232
+ }
233
+
234
+ void stl_mirror_xz(stl_file *stl)
235
+ {
236
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i)
237
+ for (int j = 0; j < 3; ++ j)
238
+ stl->facet_start[i].vertex[j](1) *= -1.0;
239
+ float temp_size = stl->stats.min(1);
240
+ stl->stats.min(1) = stl->stats.max(1);
241
+ stl->stats.max(1) = temp_size;
242
+ stl->stats.min(1) *= -1.0;
243
+ stl->stats.max(1) *= -1.0;
244
+ stl_reverse_all_facets(stl);
245
+ stl->stats.facets_reversed -= stl->stats.number_of_facets; // for not altering stats
246
+ }
247
+
248
+ static float get_area(stl_facet *facet)
249
+ {
250
+ /* cast to double before calculating cross product because large coordinates
251
+ can result in overflowing product
252
+ (bad area is responsible for bad volume and bad facets reversal) */
253
+ double cross[3][3];
254
+ for (int i = 0; i < 3; i++) {
255
+ cross[i][0]=(((double)facet->vertex[i](1) * (double)facet->vertex[(i + 1) % 3](2)) -
256
+ ((double)facet->vertex[i](2) * (double)facet->vertex[(i + 1) % 3](1)));
257
+ cross[i][1]=(((double)facet->vertex[i](2) * (double)facet->vertex[(i + 1) % 3](0)) -
258
+ ((double)facet->vertex[i](0) * (double)facet->vertex[(i + 1) % 3](2)));
259
+ cross[i][2]=(((double)facet->vertex[i](0) * (double)facet->vertex[(i + 1) % 3](1)) -
260
+ ((double)facet->vertex[i](1) * (double)facet->vertex[(i + 1) % 3](0)));
261
+ }
262
+
263
+ stl_normal sum;
264
+ sum(0) = cross[0][0] + cross[1][0] + cross[2][0];
265
+ sum(1) = cross[0][1] + cross[1][1] + cross[2][1];
266
+ sum(2) = cross[0][2] + cross[1][2] + cross[2][2];
267
+
268
+ // This should already be done. But just in case, let's do it again.
269
+ //FIXME this is questionable. the "sum" normal should be accurate, while the normal "n" may be calculated with a low accuracy.
270
+ stl_normal n;
271
+ stl_calculate_normal(n, facet);
272
+ stl_normalize_vector(n);
273
+ return 0.5f * n.dot(sum);
274
+ }
275
+
276
+ static float get_volume(stl_file *stl)
277
+ {
278
+ // Choose a point, any point as the reference.
279
+ stl_vertex p0 = stl->facet_start[0].vertex[0];
280
+ float volume = 0.f;
281
+ for (uint32_t i = 0; i < stl->stats.number_of_facets; ++ i) {
282
+ // Do dot product to get distance from point to plane.
283
+ float height = stl->facet_start[i].normal.dot(stl->facet_start[i].vertex[0] - p0);
284
+ float area = get_area(&stl->facet_start[i]);
285
+ volume += (area * height) / 3.0f;
286
+ }
287
+ return volume;
288
+ }
289
+
290
+ void stl_calculate_volume(stl_file *stl)
291
+ {
292
+ stl->stats.volume = get_volume(stl);
293
+ if (stl->stats.volume < 0.0) {
294
+ stl_reverse_all_facets(stl);
295
+ stl->stats.volume = -stl->stats.volume;
296
+ }
297
+ }
298
+
299
+ void stl_repair(
300
+ stl_file *stl,
301
+ bool fixall_flag,
302
+ bool exact_flag,
303
+ bool tolerance_flag,
304
+ float tolerance,
305
+ bool increment_flag,
306
+ float increment,
307
+ bool nearby_flag,
308
+ int iterations,
309
+ bool remove_unconnected_flag,
310
+ bool fill_holes_flag,
311
+ bool normal_directions_flag,
312
+ bool normal_values_flag,
313
+ bool reverse_all_flag,
314
+ bool verbose_flag)
315
+ {
316
+ if (exact_flag || fixall_flag || nearby_flag || remove_unconnected_flag || fill_holes_flag || normal_directions_flag) {
317
+ if (verbose_flag)
318
+ printf("Checking exact...\n");
319
+ exact_flag = true;
320
+ stl_check_facets_exact(stl);
321
+ stl->stats.facets_w_1_bad_edge = (stl->stats.connected_facets_2_edge - stl->stats.connected_facets_3_edge);
322
+ stl->stats.facets_w_2_bad_edge = (stl->stats.connected_facets_1_edge - stl->stats.connected_facets_2_edge);
323
+ stl->stats.facets_w_3_bad_edge = (stl->stats.number_of_facets - stl->stats.connected_facets_1_edge);
324
+ }
325
+
326
+ if (nearby_flag || fixall_flag) {
327
+ if (! tolerance_flag)
328
+ tolerance = stl->stats.shortest_edge;
329
+ if (! increment_flag)
330
+ increment = stl->stats.bounding_diameter / 10000.0;
331
+ }
332
+
333
+ if (stl->stats.connected_facets_3_edge < int(stl->stats.number_of_facets)) {
334
+ int last_edges_fixed = 0;
335
+ for (int i = 0; i < iterations; ++ i) {
336
+ if (stl->stats.connected_facets_3_edge < int(stl->stats.number_of_facets)) {
337
+ if (verbose_flag)
338
+ printf("Checking nearby. Tolerance= %f Iteration=%d of %d...", tolerance, i + 1, iterations);
339
+ stl_check_facets_nearby(stl, tolerance);
340
+ if (verbose_flag)
341
+ printf(" Fixed %d edges.\n", stl->stats.edges_fixed - last_edges_fixed);
342
+ last_edges_fixed = stl->stats.edges_fixed;
343
+ tolerance += increment;
344
+ } else {
345
+ if (verbose_flag)
346
+ printf("All facets connected. No further nearby check necessary.\n");
347
+ break;
348
+ }
349
+ }
350
+ } else if (verbose_flag)
351
+ printf("All facets connected. No nearby check necessary.\n");
352
+
353
+ if (remove_unconnected_flag || fixall_flag || fill_holes_flag) {
354
+ if (stl->stats.connected_facets_3_edge < int(stl->stats.number_of_facets)) {
355
+ if (verbose_flag)
356
+ printf("Removing unconnected facets...\n");
357
+ stl_remove_unconnected_facets(stl);
358
+ } else if (verbose_flag)
359
+ printf("No unconnected need to be removed.\n");
360
+ }
361
+
362
+ if (fill_holes_flag || fixall_flag) {
363
+ if (stl->stats.connected_facets_3_edge < int(stl->stats.number_of_facets)) {
364
+ if (verbose_flag)
365
+ printf("Filling holes...\n");
366
+ stl_fill_holes(stl);
367
+ } else if (verbose_flag)
368
+ printf("No holes need to be filled.\n");
369
+ }
370
+
371
+ if (reverse_all_flag) {
372
+ if (verbose_flag)
373
+ printf("Reversing all facets...\n");
374
+ stl_reverse_all_facets(stl);
375
+ }
376
+
377
+ if (normal_directions_flag || fixall_flag) {
378
+ if (verbose_flag)
379
+ printf("Checking normal directions...\n");
380
+ stl_fix_normal_directions(stl);
381
+ }
382
+
383
+ if (normal_values_flag || fixall_flag) {
384
+ if (verbose_flag)
385
+ printf("Checking normal values...\n");
386
+ stl_fix_normal_values(stl);
387
+ }
388
+
389
+ // Always calculate the volume. It shouldn't take too long.
390
+ if (verbose_flag)
391
+ printf("Calculating volume...\n");
392
+ stl_calculate_volume(stl);
393
+
394
+ if (exact_flag) {
395
+ if (verbose_flag)
396
+ printf("Verifying neighbors...\n");
397
+ stl_verify_neighbors(stl);
398
+ }
399
+ }
data/bundled_deps/agg/CMakeLists.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ cmake_minimum_required(VERSION 3.13)
2
+ project(agg)
3
+
4
+ add_library(agg INTERFACE)
5
+ target_include_directories(agg INTERFACE .)
data/bundled_deps/agg/agg/AUTHORS ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Anti-Grain Geometry - Version 2.4
2
+ Copyright (C) 2002-2005 Maxim Shemanarev (McSeem)
data/bundled_deps/agg/agg/VERSION ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ 2.4
2
+ svn revision 128
data/bundled_deps/agg/agg/agg_array.h ADDED
@@ -0,0 +1,1119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+ #ifndef AGG_ARRAY_INCLUDED
16
+ #define AGG_ARRAY_INCLUDED
17
+
18
+ #include <stddef.h>
19
+ #include <string.h>
20
+ #include "agg_basics.h"
21
+
22
+ namespace agg
23
+ {
24
+
25
+ //-------------------------------------------------------pod_array_adaptor
26
+ template<class T> class pod_array_adaptor
27
+ {
28
+ public:
29
+ typedef T value_type;
30
+ pod_array_adaptor(T* array, unsigned size) :
31
+ m_array(array), m_size(size) {}
32
+
33
+ unsigned size() const { return m_size; }
34
+ const T& operator [] (unsigned i) const { return m_array[i]; }
35
+ T& operator [] (unsigned i) { return m_array[i]; }
36
+ const T& at(unsigned i) const { return m_array[i]; }
37
+ T& at(unsigned i) { return m_array[i]; }
38
+ T value_at(unsigned i) const { return m_array[i]; }
39
+
40
+ private:
41
+ T* m_array;
42
+ unsigned m_size;
43
+ };
44
+
45
+
46
+ //---------------------------------------------------------pod_auto_array
47
+ template<class T, unsigned Size> class pod_auto_array
48
+ {
49
+ public:
50
+ typedef T value_type;
51
+ typedef pod_auto_array<T, Size> self_type;
52
+
53
+ pod_auto_array() {}
54
+ explicit pod_auto_array(const T* c)
55
+ {
56
+ memcpy(m_array, c, sizeof(T) * Size);
57
+ }
58
+
59
+ const self_type& operator = (const T* c)
60
+ {
61
+ memcpy(m_array, c, sizeof(T) * Size);
62
+ return *this;
63
+ }
64
+
65
+ static unsigned size() { return Size; }
66
+ const T& operator [] (unsigned i) const { return m_array[i]; }
67
+ T& operator [] (unsigned i) { return m_array[i]; }
68
+ const T& at(unsigned i) const { return m_array[i]; }
69
+ T& at(unsigned i) { return m_array[i]; }
70
+ T value_at(unsigned i) const { return m_array[i]; }
71
+
72
+ private:
73
+ T m_array[Size];
74
+ };
75
+
76
+
77
+ //--------------------------------------------------------pod_auto_vector
78
+ template<class T, unsigned Size> class pod_auto_vector
79
+ {
80
+ public:
81
+ typedef T value_type;
82
+ typedef pod_auto_vector<T, Size> self_type;
83
+
84
+ pod_auto_vector() : m_size(0) {}
85
+
86
+ void remove_all() { m_size = 0; }
87
+ void clear() { m_size = 0; }
88
+ void add(const T& v) { m_array[m_size++] = v; }
89
+ void push_back(const T& v) { m_array[m_size++] = v; }
90
+ void inc_size(unsigned size) { m_size += size; }
91
+
92
+ unsigned size() const { return m_size; }
93
+ const T& operator [] (unsigned i) const { return m_array[i]; }
94
+ T& operator [] (unsigned i) { return m_array[i]; }
95
+ const T& at(unsigned i) const { return m_array[i]; }
96
+ T& at(unsigned i) { return m_array[i]; }
97
+ T value_at(unsigned i) const { return m_array[i]; }
98
+
99
+ private:
100
+ T m_array[Size];
101
+ unsigned m_size;
102
+ };
103
+
104
+
105
+ //---------------------------------------------------------------pod_array
106
+ template<class T> class pod_array
107
+ {
108
+ public:
109
+ typedef T value_type;
110
+ typedef pod_array<T> self_type;
111
+
112
+ ~pod_array() { pod_allocator<T>::deallocate(m_array, m_size); }
113
+ pod_array() : m_array(0), m_size(0) {}
114
+
115
+ pod_array(unsigned size) :
116
+ m_array(pod_allocator<T>::allocate(size)),
117
+ m_size(size)
118
+ {}
119
+
120
+ pod_array(const self_type& v) :
121
+ m_array(pod_allocator<T>::allocate(v.m_size)),
122
+ m_size(v.m_size)
123
+ {
124
+ memcpy(m_array, v.m_array, sizeof(T) * m_size);
125
+ }
126
+
127
+ void resize(unsigned size)
128
+ {
129
+ if(size != m_size)
130
+ {
131
+ pod_allocator<T>::deallocate(m_array, m_size);
132
+ m_array = pod_allocator<T>::allocate(m_size = size);
133
+ }
134
+ }
135
+ const self_type& operator = (const self_type& v)
136
+ {
137
+ resize(v.size());
138
+ memcpy(m_array, v.m_array, sizeof(T) * m_size);
139
+ return *this;
140
+ }
141
+
142
+ unsigned size() const { return m_size; }
143
+ const T& operator [] (unsigned i) const { return m_array[i]; }
144
+ T& operator [] (unsigned i) { return m_array[i]; }
145
+ const T& at(unsigned i) const { return m_array[i]; }
146
+ T& at(unsigned i) { return m_array[i]; }
147
+ T value_at(unsigned i) const { return m_array[i]; }
148
+
149
+ const T* data() const { return m_array; }
150
+ T* data() { return m_array; }
151
+ private:
152
+ T* m_array;
153
+ unsigned m_size;
154
+ };
155
+
156
+
157
+
158
+ //--------------------------------------------------------------pod_vector
159
+ // A simple class template to store Plain Old Data, a vector
160
+ // of a fixed size. The data is continous in memory
161
+ //------------------------------------------------------------------------
162
+ template<class T> class pod_vector
163
+ {
164
+ public:
165
+ typedef T value_type;
166
+
167
+ ~pod_vector() { pod_allocator<T>::deallocate(m_array, m_capacity); }
168
+ pod_vector() : m_size(0), m_capacity(0), m_array(0) {}
169
+ pod_vector(unsigned cap, unsigned extra_tail=0);
170
+
171
+ // Copying
172
+ pod_vector(const pod_vector<T>&);
173
+ const pod_vector<T>& operator = (const pod_vector<T>&);
174
+
175
+ // Set new capacity. All data is lost, size is set to zero.
176
+ void capacity(unsigned cap, unsigned extra_tail=0);
177
+ unsigned capacity() const { return m_capacity; }
178
+
179
+ // Allocate n elements. All data is lost,
180
+ // but elements can be accessed in range 0...size-1.
181
+ void allocate(unsigned size, unsigned extra_tail=0);
182
+
183
+ // Resize keeping the content.
184
+ void resize(unsigned new_size);
185
+
186
+ void zero()
187
+ {
188
+ memset(m_array, 0, sizeof(T) * m_size);
189
+ }
190
+
191
+ void add(const T& v) { m_array[m_size++] = v; }
192
+ void push_back(const T& v) { m_array[m_size++] = v; }
193
+ void insert_at(unsigned pos, const T& val);
194
+ void inc_size(unsigned size) { m_size += size; }
195
+ unsigned size() const { return m_size; }
196
+ unsigned byte_size() const { return m_size * sizeof(T); }
197
+ void serialize(int8u* ptr) const;
198
+ void deserialize(const int8u* data, unsigned byte_size);
199
+ const T& operator [] (unsigned i) const { return m_array[i]; }
200
+ T& operator [] (unsigned i) { return m_array[i]; }
201
+ const T& at(unsigned i) const { return m_array[i]; }
202
+ T& at(unsigned i) { return m_array[i]; }
203
+ T value_at(unsigned i) const { return m_array[i]; }
204
+
205
+ const T* data() const { return m_array; }
206
+ T* data() { return m_array; }
207
+
208
+ void remove_all() { m_size = 0; }
209
+ void clear() { m_size = 0; }
210
+ void cut_at(unsigned num) { if(num < m_size) m_size = num; }
211
+
212
+ private:
213
+ unsigned m_size;
214
+ unsigned m_capacity;
215
+ T* m_array;
216
+ };
217
+
218
+ //------------------------------------------------------------------------
219
+ template<class T>
220
+ void pod_vector<T>::capacity(unsigned cap, unsigned extra_tail)
221
+ {
222
+ m_size = 0;
223
+ if(cap > m_capacity)
224
+ {
225
+ pod_allocator<T>::deallocate(m_array, m_capacity);
226
+ m_capacity = cap + extra_tail;
227
+ m_array = m_capacity ? pod_allocator<T>::allocate(m_capacity) : 0;
228
+ }
229
+ }
230
+
231
+ //------------------------------------------------------------------------
232
+ template<class T>
233
+ void pod_vector<T>::allocate(unsigned size, unsigned extra_tail)
234
+ {
235
+ capacity(size, extra_tail);
236
+ m_size = size;
237
+ }
238
+
239
+
240
+ //------------------------------------------------------------------------
241
+ template<class T>
242
+ void pod_vector<T>::resize(unsigned new_size)
243
+ {
244
+ if(new_size > m_size)
245
+ {
246
+ if(new_size > m_capacity)
247
+ {
248
+ T* data = pod_allocator<T>::allocate(new_size);
249
+ memcpy(data, m_array, m_size * sizeof(T));
250
+ pod_allocator<T>::deallocate(m_array, m_capacity);
251
+ m_array = data;
252
+ }
253
+ }
254
+ else
255
+ {
256
+ m_size = new_size;
257
+ }
258
+ }
259
+
260
+ //------------------------------------------------------------------------
261
+ template<class T> pod_vector<T>::pod_vector(unsigned cap, unsigned extra_tail) :
262
+ m_size(0),
263
+ m_capacity(cap + extra_tail),
264
+ m_array(pod_allocator<T>::allocate(m_capacity)) {}
265
+
266
+ //------------------------------------------------------------------------
267
+ template<class T> pod_vector<T>::pod_vector(const pod_vector<T>& v) :
268
+ m_size(v.m_size),
269
+ m_capacity(v.m_capacity),
270
+ m_array(v.m_capacity ? pod_allocator<T>::allocate(v.m_capacity) : 0)
271
+ {
272
+ memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
273
+ }
274
+
275
+ //------------------------------------------------------------------------
276
+ template<class T> const pod_vector<T>&
277
+ pod_vector<T>::operator = (const pod_vector<T>&v)
278
+ {
279
+ allocate(v.m_size);
280
+ if(v.m_size) memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
281
+ return *this;
282
+ }
283
+
284
+ //------------------------------------------------------------------------
285
+ template<class T> void pod_vector<T>::serialize(int8u* ptr) const
286
+ {
287
+ if(m_size) memcpy(ptr, m_array, m_size * sizeof(T));
288
+ }
289
+
290
+ //------------------------------------------------------------------------
291
+ template<class T>
292
+ void pod_vector<T>::deserialize(const int8u* data, unsigned byte_size)
293
+ {
294
+ byte_size /= sizeof(T);
295
+ allocate(byte_size);
296
+ if(byte_size) memcpy(m_array, data, byte_size * sizeof(T));
297
+ }
298
+
299
+ //------------------------------------------------------------------------
300
+ template<class T>
301
+ void pod_vector<T>::insert_at(unsigned pos, const T& val)
302
+ {
303
+ if(pos >= m_size)
304
+ {
305
+ m_array[m_size] = val;
306
+ }
307
+ else
308
+ {
309
+ memmove(m_array + pos + 1, m_array + pos, (m_size - pos) * sizeof(T));
310
+ m_array[pos] = val;
311
+ }
312
+ ++m_size;
313
+ }
314
+
315
+ //---------------------------------------------------------------pod_bvector
316
+ // A simple class template to store Plain Old Data, similar to std::deque
317
+ // It doesn't reallocate memory but instead, uses blocks of data of size
318
+ // of (1 << S), that is, power of two. The data is NOT contiguous in memory,
319
+ // so the only valid access method is operator [] or curr(), prev(), next()
320
+ //
321
+ // There reallocs occure only when the pool of pointers to blocks needs
322
+ // to be extended (it happens very rarely). You can control the value
323
+ // of increment to reallocate the pointer buffer. See the second constructor.
324
+ // By default, the incremeent value equals (1 << S), i.e., the block size.
325
+ //------------------------------------------------------------------------
326
+ template<class T, unsigned S=6> class pod_bvector
327
+ {
328
+ public:
329
+ enum block_scale_e
330
+ {
331
+ block_shift = S,
332
+ block_size = 1 << block_shift,
333
+ block_mask = block_size - 1
334
+ };
335
+
336
+ typedef T value_type;
337
+
338
+ ~pod_bvector();
339
+ pod_bvector();
340
+ pod_bvector(unsigned block_ptr_inc);
341
+
342
+ // Copying
343
+ pod_bvector(const pod_bvector<T, S>& v);
344
+ const pod_bvector<T, S>& operator = (const pod_bvector<T, S>& v);
345
+
346
+ void remove_all() { m_size = 0; }
347
+ void clear() { m_size = 0; }
348
+ void free_all() { free_tail(0); }
349
+ void free_tail(unsigned size);
350
+ void add(const T& val);
351
+ void push_back(const T& val) { add(val); }
352
+ void modify_last(const T& val);
353
+ void remove_last();
354
+
355
+ int allocate_continuous_block(unsigned num_elements);
356
+
357
+ void add_array(const T* ptr, unsigned num_elem)
358
+ {
359
+ while(num_elem--)
360
+ {
361
+ add(*ptr++);
362
+ }
363
+ }
364
+
365
+ template<class DataAccessor> void add_data(DataAccessor& data)
366
+ {
367
+ while(data.size())
368
+ {
369
+ add(*data);
370
+ ++data;
371
+ }
372
+ }
373
+
374
+ void cut_at(unsigned size)
375
+ {
376
+ if(size < m_size) m_size = size;
377
+ }
378
+
379
+ unsigned size() const { return m_size; }
380
+
381
+ const T& operator [] (unsigned i) const
382
+ {
383
+ return m_blocks[i >> block_shift][i & block_mask];
384
+ }
385
+
386
+ T& operator [] (unsigned i)
387
+ {
388
+ return m_blocks[i >> block_shift][i & block_mask];
389
+ }
390
+
391
+ const T& at(unsigned i) const
392
+ {
393
+ return m_blocks[i >> block_shift][i & block_mask];
394
+ }
395
+
396
+ T& at(unsigned i)
397
+ {
398
+ return m_blocks[i >> block_shift][i & block_mask];
399
+ }
400
+
401
+ T value_at(unsigned i) const
402
+ {
403
+ return m_blocks[i >> block_shift][i & block_mask];
404
+ }
405
+
406
+ const T& curr(unsigned idx) const
407
+ {
408
+ return (*this)[idx];
409
+ }
410
+
411
+ T& curr(unsigned idx)
412
+ {
413
+ return (*this)[idx];
414
+ }
415
+
416
+ const T& prev(unsigned idx) const
417
+ {
418
+ return (*this)[(idx + m_size - 1) % m_size];
419
+ }
420
+
421
+ T& prev(unsigned idx)
422
+ {
423
+ return (*this)[(idx + m_size - 1) % m_size];
424
+ }
425
+
426
+ const T& next(unsigned idx) const
427
+ {
428
+ return (*this)[(idx + 1) % m_size];
429
+ }
430
+
431
+ T& next(unsigned idx)
432
+ {
433
+ return (*this)[(idx + 1) % m_size];
434
+ }
435
+
436
+ const T& last() const
437
+ {
438
+ return (*this)[m_size - 1];
439
+ }
440
+
441
+ T& last()
442
+ {
443
+ return (*this)[m_size - 1];
444
+ }
445
+
446
+ unsigned byte_size() const;
447
+ void serialize(int8u* ptr) const;
448
+ void deserialize(const int8u* data, unsigned byte_size);
449
+ void deserialize(unsigned start, const T& empty_val,
450
+ const int8u* data, unsigned byte_size);
451
+
452
+ template<class ByteAccessor>
453
+ void deserialize(ByteAccessor data)
454
+ {
455
+ remove_all();
456
+ unsigned elem_size = data.size() / sizeof(T);
457
+
458
+ for(unsigned i = 0; i < elem_size; ++i)
459
+ {
460
+ int8u* ptr = (int8u*)data_ptr();
461
+ for(unsigned j = 0; j < sizeof(T); ++j)
462
+ {
463
+ *ptr++ = *data;
464
+ ++data;
465
+ }
466
+ ++m_size;
467
+ }
468
+ }
469
+
470
+ template<class ByteAccessor>
471
+ void deserialize(unsigned start, const T& empty_val, ByteAccessor data)
472
+ {
473
+ while(m_size < start)
474
+ {
475
+ add(empty_val);
476
+ }
477
+
478
+ unsigned elem_size = data.size() / sizeof(T);
479
+ for(unsigned i = 0; i < elem_size; ++i)
480
+ {
481
+ int8u* ptr;
482
+ if(start + i < m_size)
483
+ {
484
+ ptr = (int8u*)(&((*this)[start + i]));
485
+ }
486
+ else
487
+ {
488
+ ptr = (int8u*)data_ptr();
489
+ ++m_size;
490
+ }
491
+ for(unsigned j = 0; j < sizeof(T); ++j)
492
+ {
493
+ *ptr++ = *data;
494
+ ++data;
495
+ }
496
+ }
497
+ }
498
+
499
+ const T* block(unsigned nb) const { return m_blocks[nb]; }
500
+
501
+ private:
502
+ void allocate_block(unsigned nb);
503
+ T* data_ptr();
504
+
505
+ unsigned m_size;
506
+ unsigned m_num_blocks;
507
+ unsigned m_max_blocks;
508
+ T** m_blocks;
509
+ unsigned m_block_ptr_inc;
510
+ };
511
+
512
+
513
+ //------------------------------------------------------------------------
514
+ template<class T, unsigned S> pod_bvector<T, S>::~pod_bvector()
515
+ {
516
+ if(m_num_blocks)
517
+ {
518
+ T** blk = m_blocks + m_num_blocks - 1;
519
+ while(m_num_blocks--)
520
+ {
521
+ pod_allocator<T>::deallocate(*blk, block_size);
522
+ --blk;
523
+ }
524
+ }
525
+ pod_allocator<T*>::deallocate(m_blocks, m_max_blocks);
526
+ }
527
+
528
+
529
+ //------------------------------------------------------------------------
530
+ template<class T, unsigned S>
531
+ void pod_bvector<T, S>::free_tail(unsigned size)
532
+ {
533
+ if(size < m_size)
534
+ {
535
+ unsigned nb = (size + block_mask) >> block_shift;
536
+ while(m_num_blocks > nb)
537
+ {
538
+ pod_allocator<T>::deallocate(m_blocks[--m_num_blocks], block_size);
539
+ }
540
+ if(m_num_blocks == 0)
541
+ {
542
+ pod_allocator<T*>::deallocate(m_blocks, m_max_blocks);
543
+ m_blocks = 0;
544
+ m_max_blocks = 0;
545
+ }
546
+ m_size = size;
547
+ }
548
+ }
549
+
550
+
551
+ //------------------------------------------------------------------------
552
+ template<class T, unsigned S> pod_bvector<T, S>::pod_bvector() :
553
+ m_size(0),
554
+ m_num_blocks(0),
555
+ m_max_blocks(0),
556
+ m_blocks(0),
557
+ m_block_ptr_inc(block_size)
558
+ {
559
+ }
560
+
561
+
562
+ //------------------------------------------------------------------------
563
+ template<class T, unsigned S>
564
+ pod_bvector<T, S>::pod_bvector(unsigned block_ptr_inc) :
565
+ m_size(0),
566
+ m_num_blocks(0),
567
+ m_max_blocks(0),
568
+ m_blocks(0),
569
+ m_block_ptr_inc(block_ptr_inc)
570
+ {
571
+ }
572
+
573
+
574
+ //------------------------------------------------------------------------
575
+ template<class T, unsigned S>
576
+ pod_bvector<T, S>::pod_bvector(const pod_bvector<T, S>& v) :
577
+ m_size(v.m_size),
578
+ m_num_blocks(v.m_num_blocks),
579
+ m_max_blocks(v.m_max_blocks),
580
+ m_blocks(v.m_max_blocks ?
581
+ pod_allocator<T*>::allocate(v.m_max_blocks) :
582
+ 0),
583
+ m_block_ptr_inc(v.m_block_ptr_inc)
584
+ {
585
+ unsigned i;
586
+ for(i = 0; i < v.m_num_blocks; ++i)
587
+ {
588
+ m_blocks[i] = pod_allocator<T>::allocate(block_size);
589
+ memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
590
+ }
591
+ }
592
+
593
+
594
+ //------------------------------------------------------------------------
595
+ template<class T, unsigned S>
596
+ const pod_bvector<T, S>&
597
+ pod_bvector<T, S>::operator = (const pod_bvector<T, S>& v)
598
+ {
599
+ unsigned i;
600
+ for(i = m_num_blocks; i < v.m_num_blocks; ++i)
601
+ {
602
+ allocate_block(i);
603
+ }
604
+ for(i = 0; i < v.m_num_blocks; ++i)
605
+ {
606
+ memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
607
+ }
608
+ m_size = v.m_size;
609
+ return *this;
610
+ }
611
+
612
+
613
+ //------------------------------------------------------------------------
614
+ template<class T, unsigned S>
615
+ void pod_bvector<T, S>::allocate_block(unsigned nb)
616
+ {
617
+ if(nb >= m_max_blocks)
618
+ {
619
+ T** new_blocks = pod_allocator<T*>::allocate(m_max_blocks + m_block_ptr_inc);
620
+
621
+ if(m_blocks)
622
+ {
623
+ memcpy(new_blocks,
624
+ m_blocks,
625
+ m_num_blocks * sizeof(T*));
626
+
627
+ pod_allocator<T*>::deallocate(m_blocks, m_max_blocks);
628
+ }
629
+ m_blocks = new_blocks;
630
+ m_max_blocks += m_block_ptr_inc;
631
+ }
632
+ m_blocks[nb] = pod_allocator<T>::allocate(block_size);
633
+ m_num_blocks++;
634
+ }
635
+
636
+
637
+
638
+ //------------------------------------------------------------------------
639
+ template<class T, unsigned S>
640
+ inline T* pod_bvector<T, S>::data_ptr()
641
+ {
642
+ unsigned nb = m_size >> block_shift;
643
+ if(nb >= m_num_blocks)
644
+ {
645
+ allocate_block(nb);
646
+ }
647
+ return m_blocks[nb] + (m_size & block_mask);
648
+ }
649
+
650
+
651
+
652
+ //------------------------------------------------------------------------
653
+ template<class T, unsigned S>
654
+ inline void pod_bvector<T, S>::add(const T& val)
655
+ {
656
+ *data_ptr() = val;
657
+ ++m_size;
658
+ }
659
+
660
+
661
+ //------------------------------------------------------------------------
662
+ template<class T, unsigned S>
663
+ inline void pod_bvector<T, S>::remove_last()
664
+ {
665
+ if(m_size) --m_size;
666
+ }
667
+
668
+
669
+ //------------------------------------------------------------------------
670
+ template<class T, unsigned S>
671
+ void pod_bvector<T, S>::modify_last(const T& val)
672
+ {
673
+ remove_last();
674
+ add(val);
675
+ }
676
+
677
+
678
+ //------------------------------------------------------------------------
679
+ template<class T, unsigned S>
680
+ int pod_bvector<T, S>::allocate_continuous_block(unsigned num_elements)
681
+ {
682
+ if(num_elements < block_size)
683
+ {
684
+ data_ptr(); // Allocate initial block if necessary
685
+ unsigned rest = block_size - (m_size & block_mask);
686
+ unsigned index;
687
+ if(num_elements <= rest)
688
+ {
689
+ // The rest of the block is good, we can use it
690
+ //-----------------
691
+ index = m_size;
692
+ m_size += num_elements;
693
+ return index;
694
+ }
695
+
696
+ // New block
697
+ //---------------
698
+ m_size += rest;
699
+ data_ptr();
700
+ index = m_size;
701
+ m_size += num_elements;
702
+ return index;
703
+ }
704
+ return -1; // Impossible to allocate
705
+ }
706
+
707
+
708
+ //------------------------------------------------------------------------
709
+ template<class T, unsigned S>
710
+ unsigned pod_bvector<T, S>::byte_size() const
711
+ {
712
+ return m_size * sizeof(T);
713
+ }
714
+
715
+
716
+ //------------------------------------------------------------------------
717
+ template<class T, unsigned S>
718
+ void pod_bvector<T, S>::serialize(int8u* ptr) const
719
+ {
720
+ unsigned i;
721
+ for(i = 0; i < m_size; i++)
722
+ {
723
+ memcpy(ptr, &(*this)[i], sizeof(T));
724
+ ptr += sizeof(T);
725
+ }
726
+ }
727
+
728
+ //------------------------------------------------------------------------
729
+ template<class T, unsigned S>
730
+ void pod_bvector<T, S>::deserialize(const int8u* data, unsigned byte_size)
731
+ {
732
+ remove_all();
733
+ byte_size /= sizeof(T);
734
+ for(unsigned i = 0; i < byte_size; ++i)
735
+ {
736
+ T* ptr = data_ptr();
737
+ memcpy(ptr, data, sizeof(T));
738
+ ++m_size;
739
+ data += sizeof(T);
740
+ }
741
+ }
742
+
743
+
744
+ // Replace or add a number of elements starting from "start" position
745
+ //------------------------------------------------------------------------
746
+ template<class T, unsigned S>
747
+ void pod_bvector<T, S>::deserialize(unsigned start, const T& empty_val,
748
+ const int8u* data, unsigned byte_size)
749
+ {
750
+ while(m_size < start)
751
+ {
752
+ add(empty_val);
753
+ }
754
+
755
+ byte_size /= sizeof(T);
756
+ for(unsigned i = 0; i < byte_size; ++i)
757
+ {
758
+ if(start + i < m_size)
759
+ {
760
+ memcpy(&((*this)[start + i]), data, sizeof(T));
761
+ }
762
+ else
763
+ {
764
+ T* ptr = data_ptr();
765
+ memcpy(ptr, data, sizeof(T));
766
+ ++m_size;
767
+ }
768
+ data += sizeof(T);
769
+ }
770
+ }
771
+
772
+
773
+ //---------------------------------------------------------block_allocator
774
+ // Allocator for arbitrary POD data. Most usable in different cache
775
+ // systems for efficient memory allocations.
776
+ // Memory is allocated with blocks of fixed size ("block_size" in
777
+ // the constructor). If required size exceeds the block size the allocator
778
+ // creates a new block of the required size. However, the most efficient
779
+ // use is when the average reqired size is much less than the block size.
780
+ //------------------------------------------------------------------------
781
+ class block_allocator
782
+ {
783
+ struct block_type
784
+ {
785
+ int8u* data;
786
+ unsigned size;
787
+ };
788
+
789
+ public:
790
+ void remove_all()
791
+ {
792
+ if(m_num_blocks)
793
+ {
794
+ block_type* blk = m_blocks + m_num_blocks - 1;
795
+ while(m_num_blocks--)
796
+ {
797
+ pod_allocator<int8u>::deallocate(blk->data, blk->size);
798
+ --blk;
799
+ }
800
+ pod_allocator<block_type>::deallocate(m_blocks, m_max_blocks);
801
+ }
802
+ m_num_blocks = 0;
803
+ m_max_blocks = 0;
804
+ m_blocks = 0;
805
+ m_buf_ptr = 0;
806
+ m_rest = 0;
807
+ }
808
+
809
+ ~block_allocator()
810
+ {
811
+ remove_all();
812
+ }
813
+
814
+ block_allocator(unsigned block_size, unsigned block_ptr_inc=256-8) :
815
+ m_block_size(block_size),
816
+ m_block_ptr_inc(block_ptr_inc),
817
+ m_num_blocks(0),
818
+ m_max_blocks(0),
819
+ m_blocks(0),
820
+ m_buf_ptr(0),
821
+ m_rest(0)
822
+ {
823
+ }
824
+
825
+
826
+ int8u* allocate(unsigned size, unsigned alignment=1)
827
+ {
828
+ if(size == 0) return 0;
829
+ if(size <= m_rest)
830
+ {
831
+ int8u* ptr = m_buf_ptr;
832
+ if(alignment > 1)
833
+ {
834
+ unsigned align =
835
+ (alignment - unsigned((size_t)ptr) % alignment) % alignment;
836
+
837
+ size += align;
838
+ ptr += align;
839
+ if(size <= m_rest)
840
+ {
841
+ m_rest -= size;
842
+ m_buf_ptr += size;
843
+ return ptr;
844
+ }
845
+ allocate_block(size);
846
+ return allocate(size - align, alignment);
847
+ }
848
+ m_rest -= size;
849
+ m_buf_ptr += size;
850
+ return ptr;
851
+ }
852
+ allocate_block(size + alignment - 1);
853
+ return allocate(size, alignment);
854
+ }
855
+
856
+
857
+ private:
858
+ void allocate_block(unsigned size)
859
+ {
860
+ if(size < m_block_size) size = m_block_size;
861
+ if(m_num_blocks >= m_max_blocks)
862
+ {
863
+ block_type* new_blocks =
864
+ pod_allocator<block_type>::allocate(m_max_blocks + m_block_ptr_inc);
865
+
866
+ if(m_blocks)
867
+ {
868
+ memcpy(new_blocks,
869
+ m_blocks,
870
+ m_num_blocks * sizeof(block_type));
871
+ pod_allocator<block_type>::deallocate(m_blocks, m_max_blocks);
872
+ }
873
+ m_blocks = new_blocks;
874
+ m_max_blocks += m_block_ptr_inc;
875
+ }
876
+
877
+ m_blocks[m_num_blocks].size = size;
878
+ m_blocks[m_num_blocks].data =
879
+ m_buf_ptr =
880
+ pod_allocator<int8u>::allocate(size);
881
+
882
+ m_num_blocks++;
883
+ m_rest = size;
884
+ }
885
+
886
+ unsigned m_block_size;
887
+ unsigned m_block_ptr_inc;
888
+ unsigned m_num_blocks;
889
+ unsigned m_max_blocks;
890
+ block_type* m_blocks;
891
+ int8u* m_buf_ptr;
892
+ unsigned m_rest;
893
+ };
894
+
895
+
896
+
897
+
898
+
899
+
900
+
901
+
902
+ //------------------------------------------------------------------------
903
+ enum quick_sort_threshold_e
904
+ {
905
+ quick_sort_threshold = 9
906
+ };
907
+
908
+
909
+ //-----------------------------------------------------------swap_elements
910
+ template<class T> inline void swap_elements(T& a, T& b)
911
+ {
912
+ T temp = a;
913
+ a = b;
914
+ b = temp;
915
+ }
916
+
917
+
918
+ //--------------------------------------------------------------quick_sort
919
+ template<class Array, class Less>
920
+ void quick_sort(Array& arr, Less less)
921
+ {
922
+ if(arr.size() < 2) return;
923
+
924
+ typename Array::value_type* e1;
925
+ typename Array::value_type* e2;
926
+
927
+ int stack[80];
928
+ int* top = stack;
929
+ int limit = arr.size();
930
+ int base = 0;
931
+
932
+ for(;;)
933
+ {
934
+ int len = limit - base;
935
+
936
+ int i;
937
+ int j;
938
+ int pivot;
939
+
940
+ if(len > quick_sort_threshold)
941
+ {
942
+ // we use base + len/2 as the pivot
943
+ pivot = base + len / 2;
944
+ swap_elements(arr[base], arr[pivot]);
945
+
946
+ i = base + 1;
947
+ j = limit - 1;
948
+
949
+ // now ensure that *i <= *base <= *j
950
+ e1 = &(arr[j]);
951
+ e2 = &(arr[i]);
952
+ if(less(*e1, *e2)) swap_elements(*e1, *e2);
953
+
954
+ e1 = &(arr[base]);
955
+ e2 = &(arr[i]);
956
+ if(less(*e1, *e2)) swap_elements(*e1, *e2);
957
+
958
+ e1 = &(arr[j]);
959
+ e2 = &(arr[base]);
960
+ if(less(*e1, *e2)) swap_elements(*e1, *e2);
961
+
962
+ for(;;)
963
+ {
964
+ do i++; while( less(arr[i], arr[base]) );
965
+ do j--; while( less(arr[base], arr[j]) );
966
+
967
+ if( i > j )
968
+ {
969
+ break;
970
+ }
971
+
972
+ swap_elements(arr[i], arr[j]);
973
+ }
974
+
975
+ swap_elements(arr[base], arr[j]);
976
+
977
+ // now, push the largest sub-array
978
+ if(j - base > limit - i)
979
+ {
980
+ top[0] = base;
981
+ top[1] = j;
982
+ base = i;
983
+ }
984
+ else
985
+ {
986
+ top[0] = i;
987
+ top[1] = limit;
988
+ limit = j;
989
+ }
990
+ top += 2;
991
+ }
992
+ else
993
+ {
994
+ // the sub-array is small, perform insertion sort
995
+ j = base;
996
+ i = j + 1;
997
+
998
+ for(; i < limit; j = i, i++)
999
+ {
1000
+ for(; less(*(e1 = &(arr[j + 1])), *(e2 = &(arr[j]))); j--)
1001
+ {
1002
+ swap_elements(*e1, *e2);
1003
+ if(j == base)
1004
+ {
1005
+ break;
1006
+ }
1007
+ }
1008
+ }
1009
+ if(top > stack)
1010
+ {
1011
+ top -= 2;
1012
+ base = top[0];
1013
+ limit = top[1];
1014
+ }
1015
+ else
1016
+ {
1017
+ break;
1018
+ }
1019
+ }
1020
+ }
1021
+ }
1022
+
1023
+
1024
+
1025
+
1026
+ //------------------------------------------------------remove_duplicates
1027
+ // Remove duplicates from a sorted array. It doesn't cut the
1028
+ // tail of the array, it just returns the number of remaining elements.
1029
+ //-----------------------------------------------------------------------
1030
+ template<class Array, class Equal>
1031
+ unsigned remove_duplicates(Array& arr, Equal equal)
1032
+ {
1033
+ if(arr.size() < 2) return arr.size();
1034
+
1035
+ unsigned i, j;
1036
+ for(i = 1, j = 1; i < arr.size(); i++)
1037
+ {
1038
+ typename Array::value_type& e = arr[i];
1039
+ if(!equal(e, arr[i - 1]))
1040
+ {
1041
+ arr[j++] = e;
1042
+ }
1043
+ }
1044
+ return j;
1045
+ }
1046
+
1047
+ //--------------------------------------------------------invert_container
1048
+ template<class Array> void invert_container(Array& arr)
1049
+ {
1050
+ int i = 0;
1051
+ int j = arr.size() - 1;
1052
+ while(i < j)
1053
+ {
1054
+ swap_elements(arr[i++], arr[j--]);
1055
+ }
1056
+ }
1057
+
1058
+ //------------------------------------------------------binary_search_pos
1059
+ template<class Array, class Value, class Less>
1060
+ unsigned binary_search_pos(const Array& arr, const Value& val, Less less)
1061
+ {
1062
+ if(arr.size() == 0) return 0;
1063
+
1064
+ unsigned beg = 0;
1065
+ unsigned end = arr.size() - 1;
1066
+
1067
+ if(less(val, arr[0])) return 0;
1068
+ if(less(arr[end], val)) return end + 1;
1069
+
1070
+ while(end - beg > 1)
1071
+ {
1072
+ unsigned mid = (end + beg) >> 1;
1073
+ if(less(val, arr[mid])) end = mid;
1074
+ else beg = mid;
1075
+ }
1076
+
1077
+ //if(beg <= 0 && less(val, arr[0])) return 0;
1078
+ //if(end >= arr.size() - 1 && less(arr[end], val)) ++end;
1079
+
1080
+ return end;
1081
+ }
1082
+
1083
+ //----------------------------------------------------------range_adaptor
1084
+ template<class Array> class range_adaptor
1085
+ {
1086
+ public:
1087
+ typedef typename Array::value_type value_type;
1088
+
1089
+ range_adaptor(Array& array, unsigned start, unsigned size) :
1090
+ m_array(array), m_start(start), m_size(size)
1091
+ {}
1092
+
1093
+ unsigned size() const { return m_size; }
1094
+ const value_type& operator [] (unsigned i) const { return m_array[m_start + i]; }
1095
+ value_type& operator [] (unsigned i) { return m_array[m_start + i]; }
1096
+ const value_type& at(unsigned i) const { return m_array[m_start + i]; }
1097
+ value_type& at(unsigned i) { return m_array[m_start + i]; }
1098
+ value_type value_at(unsigned i) const { return m_array[m_start + i]; }
1099
+
1100
+ private:
1101
+ Array& m_array;
1102
+ unsigned m_start;
1103
+ unsigned m_size;
1104
+ };
1105
+
1106
+ //---------------------------------------------------------------int_less
1107
+ inline bool int_less(int a, int b) { return a < b; }
1108
+
1109
+ //------------------------------------------------------------int_greater
1110
+ inline bool int_greater(int a, int b) { return a > b; }
1111
+
1112
+ //----------------------------------------------------------unsigned_less
1113
+ inline bool unsigned_less(unsigned a, unsigned b) { return a < b; }
1114
+
1115
+ //-------------------------------------------------------unsigned_greater
1116
+ inline bool unsigned_greater(unsigned a, unsigned b) { return a > b; }
1117
+ }
1118
+
1119
+ #endif
data/bundled_deps/agg/agg/agg_basics.h ADDED
@@ -0,0 +1,574 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+
16
+ #ifndef AGG_BASICS_INCLUDED
17
+ #define AGG_BASICS_INCLUDED
18
+
19
+ #include <cmath>
20
+ #include "agg_config.h"
21
+
22
+ //---------------------------------------------------------AGG_CUSTOM_ALLOCATOR
23
+ #ifdef AGG_CUSTOM_ALLOCATOR
24
+ #include "agg_allocator.h"
25
+ #else
26
+ namespace agg
27
+ {
28
+ // The policy of all AGG containers and memory allocation strategy
29
+ // in general is that no allocated data requires explicit construction.
30
+ // It means that the allocator can be really simple; you can even
31
+ // replace new/delete to malloc/free. The constructors and destructors
32
+ // won't be called in this case, however everything will remain working.
33
+ // The second argument of deallocate() is the size of the allocated
34
+ // block. You can use this information if you wish.
35
+ //------------------------------------------------------------pod_allocator
36
+ template<class T> struct pod_allocator
37
+ {
38
+ static T* allocate(unsigned num) { return new T [num]; }
39
+ static void deallocate(T* ptr, unsigned) { delete [] ptr; }
40
+ };
41
+
42
+ // Single object allocator. It's also can be replaced with your custom
43
+ // allocator. The difference is that it can only allocate a single
44
+ // object and the constructor and destructor must be called.
45
+ // In AGG there is no need to allocate an array of objects with
46
+ // calling their constructors (only single ones). So that, if you
47
+ // replace these new/delete to malloc/free make sure that the in-place
48
+ // new is called and take care of calling the destructor too.
49
+ //------------------------------------------------------------obj_allocator
50
+ template<class T> struct obj_allocator
51
+ {
52
+ static T* allocate() { return new T; }
53
+ static void deallocate(T* ptr) { delete ptr; }
54
+ };
55
+ }
56
+ #endif
57
+
58
+
59
+ //-------------------------------------------------------- Default basic types
60
+ //
61
+ // If the compiler has different capacity of the basic types you can redefine
62
+ // them via the compiler command line or by generating agg_config.h that is
63
+ // empty by default.
64
+ //
65
+ #ifndef AGG_INT8
66
+ #define AGG_INT8 signed char
67
+ #endif
68
+
69
+ #ifndef AGG_INT8U
70
+ #define AGG_INT8U unsigned char
71
+ #endif
72
+
73
+ #ifndef AGG_INT16
74
+ #define AGG_INT16 short
75
+ #endif
76
+
77
+ #ifndef AGG_INT16U
78
+ #define AGG_INT16U unsigned short
79
+ #endif
80
+
81
+ #ifndef AGG_INT32
82
+ #define AGG_INT32 int
83
+ #endif
84
+
85
+ #ifndef AGG_INT32U
86
+ #define AGG_INT32U unsigned
87
+ #endif
88
+
89
+ #ifndef AGG_INT64
90
+ #if defined(_MSC_VER) || defined(__BORLANDC__)
91
+ #define AGG_INT64 signed __int64
92
+ #else
93
+ #define AGG_INT64 signed long long
94
+ #endif
95
+ #endif
96
+
97
+ #ifndef AGG_INT64U
98
+ #if defined(_MSC_VER) || defined(__BORLANDC__)
99
+ #define AGG_INT64U unsigned __int64
100
+ #else
101
+ #define AGG_INT64U unsigned long long
102
+ #endif
103
+ #endif
104
+
105
+ //------------------------------------------------ Some fixes for MS Visual C++
106
+ #if defined(_MSC_VER)
107
+ #pragma warning(disable:4786) // Identifier was truncated...
108
+ #endif
109
+
110
+ #if defined(_MSC_VER)
111
+ #define AGG_INLINE __forceinline
112
+ #else
113
+ #define AGG_INLINE inline
114
+ #endif
115
+
116
+ namespace agg
117
+ {
118
+ //-------------------------------------------------------------------------
119
+ typedef AGG_INT8 int8; //----int8
120
+ typedef AGG_INT8U int8u; //----int8u
121
+ typedef AGG_INT16 int16; //----int16
122
+ typedef AGG_INT16U int16u; //----int16u
123
+ typedef AGG_INT32 int32; //----int32
124
+ typedef AGG_INT32U int32u; //----int32u
125
+ typedef AGG_INT64 int64; //----int64
126
+ typedef AGG_INT64U int64u; //----int64u
127
+
128
+ #if defined(AGG_FISTP)
129
+ #pragma warning(push)
130
+ #pragma warning(disable : 4035) //Disable warning "no return value"
131
+ AGG_INLINE int iround(double v) //-------iround
132
+ {
133
+ int t;
134
+ __asm fld qword ptr [v]
135
+ __asm fistp dword ptr [t]
136
+ __asm mov eax, dword ptr [t]
137
+ }
138
+ AGG_INLINE unsigned uround(double v) //-------uround
139
+ {
140
+ unsigned t;
141
+ __asm fld qword ptr [v]
142
+ __asm fistp dword ptr [t]
143
+ __asm mov eax, dword ptr [t]
144
+ }
145
+ #pragma warning(pop)
146
+ AGG_INLINE int ifloor(double v)
147
+ {
148
+ return int(floor(v));
149
+ }
150
+ AGG_INLINE unsigned ufloor(double v) //-------ufloor
151
+ {
152
+ return unsigned(floor(v));
153
+ }
154
+ AGG_INLINE int iceil(double v)
155
+ {
156
+ return int(ceil(v));
157
+ }
158
+ AGG_INLINE unsigned uceil(double v) //--------uceil
159
+ {
160
+ return unsigned(ceil(v));
161
+ }
162
+ #elif defined(AGG_QIFIST)
163
+ AGG_INLINE int iround(double v)
164
+ {
165
+ return int(v);
166
+ }
167
+ AGG_INLINE int uround(double v)
168
+ {
169
+ return unsigned(v);
170
+ }
171
+ AGG_INLINE int ifloor(double v)
172
+ {
173
+ return int(floor(v));
174
+ }
175
+ AGG_INLINE unsigned ufloor(double v)
176
+ {
177
+ return unsigned(floor(v));
178
+ }
179
+ AGG_INLINE int iceil(double v)
180
+ {
181
+ return int(ceil(v));
182
+ }
183
+ AGG_INLINE unsigned uceil(double v)
184
+ {
185
+ return unsigned(ceil(v));
186
+ }
187
+ #else
188
+ AGG_INLINE int iround(double v)
189
+ {
190
+ return int((v < 0.0) ? v - 0.5 : v + 0.5);
191
+ }
192
+ AGG_INLINE int uround(double v)
193
+ {
194
+ return unsigned(v + 0.5);
195
+ }
196
+ AGG_INLINE int ifloor(double v)
197
+ {
198
+ int i = int(v);
199
+ return i - (i > v);
200
+ }
201
+ AGG_INLINE unsigned ufloor(double v)
202
+ {
203
+ return unsigned(v);
204
+ }
205
+ AGG_INLINE int iceil(double v)
206
+ {
207
+ return int(ceil(v));
208
+ }
209
+ AGG_INLINE unsigned uceil(double v)
210
+ {
211
+ return unsigned(ceil(v));
212
+ }
213
+ #endif
214
+
215
+ //---------------------------------------------------------------saturation
216
+ template<int Limit> struct saturation
217
+ {
218
+ AGG_INLINE static int iround(double v)
219
+ {
220
+ if(v < double(-Limit)) return -Limit;
221
+ if(v > double( Limit)) return Limit;
222
+ return agg::iround(v);
223
+ }
224
+ };
225
+
226
+ //------------------------------------------------------------------mul_one
227
+ template<unsigned Shift> struct mul_one
228
+ {
229
+ AGG_INLINE static unsigned mul(unsigned a, unsigned b)
230
+ {
231
+ unsigned q = a * b + (1 << (Shift-1));
232
+ return (q + (q >> Shift)) >> Shift;
233
+ }
234
+ };
235
+
236
+ //-------------------------------------------------------------------------
237
+ typedef unsigned char cover_type; //----cover_type
238
+ enum cover_scale_e
239
+ {
240
+ cover_shift = 8, //----cover_shift
241
+ cover_size = 1 << cover_shift, //----cover_size
242
+ cover_mask = cover_size - 1, //----cover_mask
243
+ cover_none = 0, //----cover_none
244
+ cover_full = cover_mask //----cover_full
245
+ };
246
+
247
+ //----------------------------------------------------poly_subpixel_scale_e
248
+ // These constants determine the subpixel accuracy, to be more precise,
249
+ // the number of bits of the fractional part of the coordinates.
250
+ // The possible coordinate capacity in bits can be calculated by formula:
251
+ // sizeof(int) * 8 - poly_subpixel_shift, i.e, for 32-bit integers and
252
+ // 8-bits fractional part the capacity is 24 bits.
253
+ enum poly_subpixel_scale_e
254
+ {
255
+ poly_subpixel_shift = 8, //----poly_subpixel_shift
256
+ poly_subpixel_scale = 1<<poly_subpixel_shift, //----poly_subpixel_scale
257
+ poly_subpixel_mask = poly_subpixel_scale-1 //----poly_subpixel_mask
258
+ };
259
+
260
+ //----------------------------------------------------------filling_rule_e
261
+ enum filling_rule_e
262
+ {
263
+ fill_non_zero,
264
+ fill_even_odd
265
+ };
266
+
267
+ //-----------------------------------------------------------------------pi
268
+ const double pi = 3.14159265358979323846;
269
+
270
+ //------------------------------------------------------------------deg2rad
271
+ inline double deg2rad(double deg)
272
+ {
273
+ return deg * pi / 180.0;
274
+ }
275
+
276
+ //------------------------------------------------------------------rad2deg
277
+ inline double rad2deg(double rad)
278
+ {
279
+ return rad * 180.0 / pi;
280
+ }
281
+
282
+ //----------------------------------------------------------------rect_base
283
+ template<class T> struct rect_base
284
+ {
285
+ typedef T value_type;
286
+ typedef rect_base<T> self_type;
287
+ T x1, y1, x2, y2;
288
+
289
+ rect_base() {}
290
+ rect_base(T x1_, T y1_, T x2_, T y2_) :
291
+ x1(x1_), y1(y1_), x2(x2_), y2(y2_) {}
292
+
293
+ void init(T x1_, T y1_, T x2_, T y2_)
294
+ {
295
+ x1 = x1_; y1 = y1_; x2 = x2_; y2 = y2_;
296
+ }
297
+
298
+ const self_type& normalize()
299
+ {
300
+ T t;
301
+ if(x1 > x2) { t = x1; x1 = x2; x2 = t; }
302
+ if(y1 > y2) { t = y1; y1 = y2; y2 = t; }
303
+ return *this;
304
+ }
305
+
306
+ bool clip(const self_type& r)
307
+ {
308
+ if(x2 > r.x2) x2 = r.x2;
309
+ if(y2 > r.y2) y2 = r.y2;
310
+ if(x1 < r.x1) x1 = r.x1;
311
+ if(y1 < r.y1) y1 = r.y1;
312
+ return x1 <= x2 && y1 <= y2;
313
+ }
314
+
315
+ bool is_valid() const
316
+ {
317
+ return x1 <= x2 && y1 <= y2;
318
+ }
319
+
320
+ bool hit_test(T x, T y) const
321
+ {
322
+ return (x >= x1 && x <= x2 && y >= y1 && y <= y2);
323
+ }
324
+
325
+ bool overlaps(const self_type& r) const
326
+ {
327
+ return !(r.x1 > x2 || r.x2 < x1
328
+ || r.y1 > y2 || r.y2 < y1);
329
+ }
330
+ };
331
+
332
+ //-----------------------------------------------------intersect_rectangles
333
+ template<class Rect>
334
+ inline Rect intersect_rectangles(const Rect& r1, const Rect& r2)
335
+ {
336
+ Rect r = r1;
337
+
338
+ // First process x2,y2 because the other order
339
+ // results in Internal Compiler Error under
340
+ // Microsoft Visual C++ .NET 2003 69462-335-0000007-18038 in
341
+ // case of "Maximize Speed" optimization option.
342
+ //-----------------
343
+ if(r.x2 > r2.x2) r.x2 = r2.x2;
344
+ if(r.y2 > r2.y2) r.y2 = r2.y2;
345
+ if(r.x1 < r2.x1) r.x1 = r2.x1;
346
+ if(r.y1 < r2.y1) r.y1 = r2.y1;
347
+ return r;
348
+ }
349
+
350
+
351
+ //---------------------------------------------------------unite_rectangles
352
+ template<class Rect>
353
+ inline Rect unite_rectangles(const Rect& r1, const Rect& r2)
354
+ {
355
+ Rect r = r1;
356
+ if(r.x2 < r2.x2) r.x2 = r2.x2;
357
+ if(r.y2 < r2.y2) r.y2 = r2.y2;
358
+ if(r.x1 > r2.x1) r.x1 = r2.x1;
359
+ if(r.y1 > r2.y1) r.y1 = r2.y1;
360
+ return r;
361
+ }
362
+
363
+ typedef rect_base<int> rect_i; //----rect_i
364
+ typedef rect_base<float> rect_f; //----rect_f
365
+ typedef rect_base<double> rect_d; //----rect_d
366
+
367
+ //---------------------------------------------------------path_commands_e
368
+ enum path_commands_e
369
+ {
370
+ path_cmd_stop = 0, //----path_cmd_stop
371
+ path_cmd_move_to = 1, //----path_cmd_move_to
372
+ path_cmd_line_to = 2, //----path_cmd_line_to
373
+ path_cmd_curve3 = 3, //----path_cmd_curve3
374
+ path_cmd_curve4 = 4, //----path_cmd_curve4
375
+ path_cmd_curveN = 5, //----path_cmd_curveN
376
+ path_cmd_catrom = 6, //----path_cmd_catrom
377
+ path_cmd_ubspline = 7, //----path_cmd_ubspline
378
+ path_cmd_end_poly = 0x0F, //----path_cmd_end_poly
379
+ path_cmd_mask = 0x0F //----path_cmd_mask
380
+ };
381
+
382
+ //------------------------------------------------------------path_flags_e
383
+ enum path_flags_e
384
+ {
385
+ path_flags_none = 0, //----path_flags_none
386
+ path_flags_ccw = 0x10, //----path_flags_ccw
387
+ path_flags_cw = 0x20, //----path_flags_cw
388
+ path_flags_close = 0x40, //----path_flags_close
389
+ path_flags_mask = 0xF0 //----path_flags_mask
390
+ };
391
+
392
+ //---------------------------------------------------------------is_vertex
393
+ inline bool is_vertex(unsigned c)
394
+ {
395
+ return c >= path_cmd_move_to && c < path_cmd_end_poly;
396
+ }
397
+
398
+ //--------------------------------------------------------------is_drawing
399
+ inline bool is_drawing(unsigned c)
400
+ {
401
+ return c >= path_cmd_line_to && c < path_cmd_end_poly;
402
+ }
403
+
404
+ //-----------------------------------------------------------------is_stop
405
+ inline bool is_stop(unsigned c)
406
+ {
407
+ return c == path_cmd_stop;
408
+ }
409
+
410
+ //--------------------------------------------------------------is_move_to
411
+ inline bool is_move_to(unsigned c)
412
+ {
413
+ return c == path_cmd_move_to;
414
+ }
415
+
416
+ //--------------------------------------------------------------is_line_to
417
+ inline bool is_line_to(unsigned c)
418
+ {
419
+ return c == path_cmd_line_to;
420
+ }
421
+
422
+ //----------------------------------------------------------------is_curve
423
+ inline bool is_curve(unsigned c)
424
+ {
425
+ return c == path_cmd_curve3 || c == path_cmd_curve4;
426
+ }
427
+
428
+ //---------------------------------------------------------------is_curve3
429
+ inline bool is_curve3(unsigned c)
430
+ {
431
+ return c == path_cmd_curve3;
432
+ }
433
+
434
+ //---------------------------------------------------------------is_curve4
435
+ inline bool is_curve4(unsigned c)
436
+ {
437
+ return c == path_cmd_curve4;
438
+ }
439
+
440
+ //-------------------------------------------------------------is_end_poly
441
+ inline bool is_end_poly(unsigned c)
442
+ {
443
+ return (c & path_cmd_mask) == path_cmd_end_poly;
444
+ }
445
+
446
+ //----------------------------------------------------------------is_close
447
+ inline bool is_close(unsigned c)
448
+ {
449
+ return (c & ~(path_flags_cw | path_flags_ccw)) ==
450
+ (path_cmd_end_poly | path_flags_close);
451
+ }
452
+
453
+ //------------------------------------------------------------is_next_poly
454
+ inline bool is_next_poly(unsigned c)
455
+ {
456
+ return is_stop(c) || is_move_to(c) || is_end_poly(c);
457
+ }
458
+
459
+ //-------------------------------------------------------------------is_cw
460
+ inline bool is_cw(unsigned c)
461
+ {
462
+ return (c & path_flags_cw) != 0;
463
+ }
464
+
465
+ //------------------------------------------------------------------is_ccw
466
+ inline bool is_ccw(unsigned c)
467
+ {
468
+ return (c & path_flags_ccw) != 0;
469
+ }
470
+
471
+ //-------------------------------------------------------------is_oriented
472
+ inline bool is_oriented(unsigned c)
473
+ {
474
+ return (c & (path_flags_cw | path_flags_ccw)) != 0;
475
+ }
476
+
477
+ //---------------------------------------------------------------is_closed
478
+ inline bool is_closed(unsigned c)
479
+ {
480
+ return (c & path_flags_close) != 0;
481
+ }
482
+
483
+ //----------------------------------------------------------get_close_flag
484
+ inline unsigned get_close_flag(unsigned c)
485
+ {
486
+ return c & path_flags_close;
487
+ }
488
+
489
+ //-------------------------------------------------------clear_orientation
490
+ inline unsigned clear_orientation(unsigned c)
491
+ {
492
+ return c & ~(path_flags_cw | path_flags_ccw);
493
+ }
494
+
495
+ //---------------------------------------------------------get_orientation
496
+ inline unsigned get_orientation(unsigned c)
497
+ {
498
+ return c & (path_flags_cw | path_flags_ccw);
499
+ }
500
+
501
+ //---------------------------------------------------------set_orientation
502
+ inline unsigned set_orientation(unsigned c, unsigned o)
503
+ {
504
+ return clear_orientation(c) | o;
505
+ }
506
+
507
+ //--------------------------------------------------------------point_base
508
+ template<class T> struct point_base
509
+ {
510
+ typedef T value_type;
511
+ T x,y;
512
+ point_base() {}
513
+ point_base(T x_, T y_) : x(x_), y(y_) {}
514
+ };
515
+ typedef point_base<int> point_i; //-----point_i
516
+ typedef point_base<float> point_f; //-----point_f
517
+ typedef point_base<double> point_d; //-----point_d
518
+
519
+ //-------------------------------------------------------------vertex_base
520
+ template<class T> struct vertex_base
521
+ {
522
+ typedef T value_type;
523
+ T x,y;
524
+ unsigned cmd;
525
+ vertex_base() {}
526
+ vertex_base(T x_, T y_, unsigned cmd_) : x(x_), y(y_), cmd(cmd_) {}
527
+ };
528
+ typedef vertex_base<int> vertex_i; //-----vertex_i
529
+ typedef vertex_base<float> vertex_f; //-----vertex_f
530
+ typedef vertex_base<double> vertex_d; //-----vertex_d
531
+
532
+ //----------------------------------------------------------------row_info
533
+ template<class T> struct row_info
534
+ {
535
+ int x1, x2;
536
+ T* ptr;
537
+ row_info() {}
538
+ row_info(int x1_, int x2_, T* ptr_) : x1(x1_), x2(x2_), ptr(ptr_) {}
539
+ };
540
+
541
+ //----------------------------------------------------------const_row_info
542
+ template<class T> struct const_row_info
543
+ {
544
+ int x1, x2;
545
+ const T* ptr;
546
+ const_row_info() {}
547
+ const_row_info(int x1_, int x2_, const T* ptr_) :
548
+ x1(x1_), x2(x2_), ptr(ptr_) {}
549
+ };
550
+
551
+ //------------------------------------------------------------is_equal_eps
552
+ template<class T> inline bool is_equal_eps(T v1, T v2, T epsilon)
553
+ {
554
+ bool neg1 = v1 < 0.0;
555
+ bool neg2 = v2 < 0.0;
556
+
557
+ if (neg1 != neg2)
558
+ return std::fabs(v1) < epsilon && std::fabs(v2) < epsilon;
559
+
560
+ int int1, int2;
561
+ std::frexp(v1, &int1);
562
+ std::frexp(v2, &int2);
563
+ int min12 = int1 < int2 ? int1 : int2;
564
+
565
+ v1 = std::ldexp(v1, -min12);
566
+ v2 = std::ldexp(v2, -min12);
567
+
568
+ return std::fabs(v1 - v2) < epsilon;
569
+ }
570
+ }
571
+
572
+
573
+ #endif
574
+
data/bundled_deps/agg/agg/agg_bezier_arc.h ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+ //
16
+ // Arc generator. Produces at most 4 consecutive cubic bezier curves, i.e.,
17
+ // 4, 7, 10, or 13 vertices.
18
+ //
19
+ //----------------------------------------------------------------------------
20
+
21
+ #ifndef AGG_BEZIER_ARC_INCLUDED
22
+ #define AGG_BEZIER_ARC_INCLUDED
23
+
24
+ #include "agg_conv_transform.h"
25
+
26
+ namespace agg
27
+ {
28
+
29
+ //-----------------------------------------------------------------------
30
+ void arc_to_bezier(double cx, double cy, double rx, double ry,
31
+ double start_angle, double sweep_angle,
32
+ double* curve);
33
+
34
+
35
+ //==============================================================bezier_arc
36
+ //
37
+ // See implemantaion agg_bezier_arc.cpp
38
+ //
39
+ class bezier_arc
40
+ {
41
+ public:
42
+ //--------------------------------------------------------------------
43
+ bezier_arc() : m_vertex(26), m_num_vertices(0), m_cmd(path_cmd_line_to) {}
44
+ bezier_arc(double x, double y,
45
+ double rx, double ry,
46
+ double start_angle,
47
+ double sweep_angle)
48
+ {
49
+ init(x, y, rx, ry, start_angle, sweep_angle);
50
+ }
51
+
52
+ //--------------------------------------------------------------------
53
+ void init(double x, double y,
54
+ double rx, double ry,
55
+ double start_angle,
56
+ double sweep_angle);
57
+
58
+ //--------------------------------------------------------------------
59
+ void rewind(unsigned)
60
+ {
61
+ m_vertex = 0;
62
+ }
63
+
64
+ //--------------------------------------------------------------------
65
+ unsigned vertex(double* x, double* y)
66
+ {
67
+ if(m_vertex >= m_num_vertices) return path_cmd_stop;
68
+ *x = m_vertices[m_vertex];
69
+ *y = m_vertices[m_vertex + 1];
70
+ m_vertex += 2;
71
+ return (m_vertex == 2) ? unsigned(path_cmd_move_to) : m_cmd;
72
+ }
73
+
74
+ // Supplemantary functions. num_vertices() actually returns doubled
75
+ // number of vertices. That is, for 1 vertex it returns 2.
76
+ //--------------------------------------------------------------------
77
+ unsigned num_vertices() const { return m_num_vertices; }
78
+ const double* vertices() const { return m_vertices; }
79
+ double* vertices() { return m_vertices; }
80
+
81
+ private:
82
+ unsigned m_vertex;
83
+ unsigned m_num_vertices;
84
+ double m_vertices[26];
85
+ unsigned m_cmd;
86
+ };
87
+
88
+
89
+
90
+ //==========================================================bezier_arc_svg
91
+ // Compute an SVG-style bezier arc.
92
+ //
93
+ // Computes an elliptical arc from (x1, y1) to (x2, y2). The size and
94
+ // orientation of the ellipse are defined by two radii (rx, ry)
95
+ // and an x-axis-rotation, which indicates how the ellipse as a whole
96
+ // is rotated relative to the current coordinate system. The center
97
+ // (cx, cy) of the ellipse is calculated automatically to satisfy the
98
+ // constraints imposed by the other parameters.
99
+ // large-arc-flag and sweep-flag contribute to the automatic calculations
100
+ // and help determine how the arc is drawn.
101
+ class bezier_arc_svg
102
+ {
103
+ public:
104
+ //--------------------------------------------------------------------
105
+ bezier_arc_svg() : m_arc(), m_radii_ok(false) {}
106
+
107
+ bezier_arc_svg(double x1, double y1,
108
+ double rx, double ry,
109
+ double angle,
110
+ bool large_arc_flag,
111
+ bool sweep_flag,
112
+ double x2, double y2) :
113
+ m_arc(), m_radii_ok(false)
114
+ {
115
+ init(x1, y1, rx, ry, angle, large_arc_flag, sweep_flag, x2, y2);
116
+ }
117
+
118
+ //--------------------------------------------------------------------
119
+ void init(double x1, double y1,
120
+ double rx, double ry,
121
+ double angle,
122
+ bool large_arc_flag,
123
+ bool sweep_flag,
124
+ double x2, double y2);
125
+
126
+ //--------------------------------------------------------------------
127
+ bool radii_ok() const { return m_radii_ok; }
128
+
129
+ //--------------------------------------------------------------------
130
+ void rewind(unsigned)
131
+ {
132
+ m_arc.rewind(0);
133
+ }
134
+
135
+ //--------------------------------------------------------------------
136
+ unsigned vertex(double* x, double* y)
137
+ {
138
+ return m_arc.vertex(x, y);
139
+ }
140
+
141
+ // Supplemantary functions. num_vertices() actually returns doubled
142
+ // number of vertices. That is, for 1 vertex it returns 2.
143
+ //--------------------------------------------------------------------
144
+ unsigned num_vertices() const { return m_arc.num_vertices(); }
145
+ const double* vertices() const { return m_arc.vertices(); }
146
+ double* vertices() { return m_arc.vertices(); }
147
+
148
+ private:
149
+ bezier_arc m_arc;
150
+ bool m_radii_ok;
151
+ };
152
+
153
+
154
+
155
+
156
+ }
157
+
158
+
159
+ #endif
data/bundled_deps/agg/agg/agg_clip_liang_barsky.h ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+ //
16
+ // Liang-Barsky clipping
17
+ //
18
+ //----------------------------------------------------------------------------
19
+ #ifndef AGG_CLIP_LIANG_BARSKY_INCLUDED
20
+ #define AGG_CLIP_LIANG_BARSKY_INCLUDED
21
+
22
+ #include "agg_basics.h"
23
+
24
+ namespace agg
25
+ {
26
+
27
+ //------------------------------------------------------------------------
28
+ enum clipping_flags_e
29
+ {
30
+ clipping_flags_x1_clipped = 4,
31
+ clipping_flags_x2_clipped = 1,
32
+ clipping_flags_y1_clipped = 8,
33
+ clipping_flags_y2_clipped = 2,
34
+ clipping_flags_x_clipped = clipping_flags_x1_clipped | clipping_flags_x2_clipped,
35
+ clipping_flags_y_clipped = clipping_flags_y1_clipped | clipping_flags_y2_clipped
36
+ };
37
+
38
+ //----------------------------------------------------------clipping_flags
39
+ // Determine the clipping code of the vertex according to the
40
+ // Cyrus-Beck line clipping algorithm
41
+ //
42
+ // | |
43
+ // 0110 | 0010 | 0011
44
+ // | |
45
+ // -------+--------+-------- clip_box.y2
46
+ // | |
47
+ // 0100 | 0000 | 0001
48
+ // | |
49
+ // -------+--------+-------- clip_box.y1
50
+ // | |
51
+ // 1100 | 1000 | 1001
52
+ // | |
53
+ // clip_box.x1 clip_box.x2
54
+ //
55
+ //
56
+ template<class T>
57
+ inline unsigned clipping_flags(T x, T y, const rect_base<T>& clip_box)
58
+ {
59
+ return (x > clip_box.x2) |
60
+ ((y > clip_box.y2) << 1) |
61
+ ((x < clip_box.x1) << 2) |
62
+ ((y < clip_box.y1) << 3);
63
+ }
64
+
65
+ //--------------------------------------------------------clipping_flags_x
66
+ template<class T>
67
+ inline unsigned clipping_flags_x(T x, const rect_base<T>& clip_box)
68
+ {
69
+ return (x > clip_box.x2) | ((x < clip_box.x1) << 2);
70
+ }
71
+
72
+
73
+ //--------------------------------------------------------clipping_flags_y
74
+ template<class T>
75
+ inline unsigned clipping_flags_y(T y, const rect_base<T>& clip_box)
76
+ {
77
+ return ((y > clip_box.y2) << 1) | ((y < clip_box.y1) << 3);
78
+ }
79
+
80
+
81
+ //-------------------------------------------------------clip_liang_barsky
82
+ template<class T>
83
+ inline unsigned clip_liang_barsky(T x1, T y1, T x2, T y2,
84
+ const rect_base<T>& clip_box,
85
+ T* x, T* y)
86
+ {
87
+ const double nearzero = 1e-30;
88
+
89
+ double deltax = x2 - x1;
90
+ double deltay = y2 - y1;
91
+ double xin;
92
+ double xout;
93
+ double yin;
94
+ double yout;
95
+ double tinx;
96
+ double tiny;
97
+ double toutx;
98
+ double touty;
99
+ double tin1;
100
+ double tin2;
101
+ double tout1;
102
+ unsigned np = 0;
103
+
104
+ if(deltax == 0.0)
105
+ {
106
+ // bump off of the vertical
107
+ deltax = (x1 > clip_box.x1) ? -nearzero : nearzero;
108
+ }
109
+
110
+ if(deltay == 0.0)
111
+ {
112
+ // bump off of the horizontal
113
+ deltay = (y1 > clip_box.y1) ? -nearzero : nearzero;
114
+ }
115
+
116
+ if(deltax > 0.0)
117
+ {
118
+ // points to right
119
+ xin = clip_box.x1;
120
+ xout = clip_box.x2;
121
+ }
122
+ else
123
+ {
124
+ xin = clip_box.x2;
125
+ xout = clip_box.x1;
126
+ }
127
+
128
+ if(deltay > 0.0)
129
+ {
130
+ // points up
131
+ yin = clip_box.y1;
132
+ yout = clip_box.y2;
133
+ }
134
+ else
135
+ {
136
+ yin = clip_box.y2;
137
+ yout = clip_box.y1;
138
+ }
139
+
140
+ tinx = (xin - x1) / deltax;
141
+ tiny = (yin - y1) / deltay;
142
+
143
+ if (tinx < tiny)
144
+ {
145
+ // hits x first
146
+ tin1 = tinx;
147
+ tin2 = tiny;
148
+ }
149
+ else
150
+ {
151
+ // hits y first
152
+ tin1 = tiny;
153
+ tin2 = tinx;
154
+ }
155
+
156
+ if(tin1 <= 1.0)
157
+ {
158
+ if(0.0 < tin1)
159
+ {
160
+ *x++ = (T)xin;
161
+ *y++ = (T)yin;
162
+ ++np;
163
+ }
164
+
165
+ if(tin2 <= 1.0)
166
+ {
167
+ toutx = (xout - x1) / deltax;
168
+ touty = (yout - y1) / deltay;
169
+
170
+ tout1 = (toutx < touty) ? toutx : touty;
171
+
172
+ if(tin2 > 0.0 || tout1 > 0.0)
173
+ {
174
+ if(tin2 <= tout1)
175
+ {
176
+ if(tin2 > 0.0)
177
+ {
178
+ if(tinx > tiny)
179
+ {
180
+ *x++ = (T)xin;
181
+ *y++ = (T)(y1 + tinx * deltay);
182
+ }
183
+ else
184
+ {
185
+ *x++ = (T)(x1 + tiny * deltax);
186
+ *y++ = (T)yin;
187
+ }
188
+ ++np;
189
+ }
190
+
191
+ if(tout1 < 1.0)
192
+ {
193
+ if(toutx < touty)
194
+ {
195
+ *x++ = (T)xout;
196
+ *y++ = (T)(y1 + toutx * deltay);
197
+ }
198
+ else
199
+ {
200
+ *x++ = (T)(x1 + touty * deltax);
201
+ *y++ = (T)yout;
202
+ }
203
+ }
204
+ else
205
+ {
206
+ *x++ = x2;
207
+ *y++ = y2;
208
+ }
209
+ ++np;
210
+ }
211
+ else
212
+ {
213
+ if(tinx > tiny)
214
+ {
215
+ *x++ = (T)xin;
216
+ *y++ = (T)yout;
217
+ }
218
+ else
219
+ {
220
+ *x++ = (T)xout;
221
+ *y++ = (T)yin;
222
+ }
223
+ ++np;
224
+ }
225
+ }
226
+ }
227
+ }
228
+ return np;
229
+ }
230
+
231
+
232
+ //----------------------------------------------------------------------------
233
+ template<class T>
234
+ bool clip_move_point(T x1, T y1, T x2, T y2,
235
+ const rect_base<T>& clip_box,
236
+ T* x, T* y, unsigned flags)
237
+ {
238
+ T bound;
239
+
240
+ if(flags & clipping_flags_x_clipped)
241
+ {
242
+ if(x1 == x2)
243
+ {
244
+ return false;
245
+ }
246
+ bound = (flags & clipping_flags_x1_clipped) ? clip_box.x1 : clip_box.x2;
247
+ *y = (T)(double(bound - x1) * (y2 - y1) / (x2 - x1) + y1);
248
+ *x = bound;
249
+ }
250
+
251
+ flags = clipping_flags_y(*y, clip_box);
252
+ if(flags & clipping_flags_y_clipped)
253
+ {
254
+ if(y1 == y2)
255
+ {
256
+ return false;
257
+ }
258
+ bound = (flags & clipping_flags_y1_clipped) ? clip_box.y1 : clip_box.y2;
259
+ *x = (T)(double(bound - y1) * (x2 - x1) / (y2 - y1) + x1);
260
+ *y = bound;
261
+ }
262
+ return true;
263
+ }
264
+
265
+ //-------------------------------------------------------clip_line_segment
266
+ // Returns: ret >= 4 - Fully clipped
267
+ // (ret & 1) != 0 - First point has been moved
268
+ // (ret & 2) != 0 - Second point has been moved
269
+ //
270
+ template<class T>
271
+ unsigned clip_line_segment(T* x1, T* y1, T* x2, T* y2,
272
+ const rect_base<T>& clip_box)
273
+ {
274
+ unsigned f1 = clipping_flags(*x1, *y1, clip_box);
275
+ unsigned f2 = clipping_flags(*x2, *y2, clip_box);
276
+ unsigned ret = 0;
277
+
278
+ if((f2 | f1) == 0)
279
+ {
280
+ // Fully visible
281
+ return 0;
282
+ }
283
+
284
+ if((f1 & clipping_flags_x_clipped) != 0 &&
285
+ (f1 & clipping_flags_x_clipped) == (f2 & clipping_flags_x_clipped))
286
+ {
287
+ // Fully clipped
288
+ return 4;
289
+ }
290
+
291
+ if((f1 & clipping_flags_y_clipped) != 0 &&
292
+ (f1 & clipping_flags_y_clipped) == (f2 & clipping_flags_y_clipped))
293
+ {
294
+ // Fully clipped
295
+ return 4;
296
+ }
297
+
298
+ T tx1 = *x1;
299
+ T ty1 = *y1;
300
+ T tx2 = *x2;
301
+ T ty2 = *y2;
302
+ if(f1)
303
+ {
304
+ if(!clip_move_point(tx1, ty1, tx2, ty2, clip_box, x1, y1, f1))
305
+ {
306
+ return 4;
307
+ }
308
+ if(*x1 == *x2 && *y1 == *y2)
309
+ {
310
+ return 4;
311
+ }
312
+ ret |= 1;
313
+ }
314
+ if(f2)
315
+ {
316
+ if(!clip_move_point(tx1, ty1, tx2, ty2, clip_box, x2, y2, f2))
317
+ {
318
+ return 4;
319
+ }
320
+ if(*x1 == *x2 && *y1 == *y2)
321
+ {
322
+ return 4;
323
+ }
324
+ ret |= 2;
325
+ }
326
+ return ret;
327
+ }
328
+
329
+
330
+ }
331
+
332
+
333
+ #endif
data/bundled_deps/agg/agg/agg_color_gray.h ADDED
@@ -0,0 +1,1047 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+ //
16
+ // Adaptation for high precision colors has been sponsored by
17
+ // Liberty Technology Systems, Inc., visit http://lib-sys.com
18
+ //
19
+ // Liberty Technology Systems, Inc. is the provider of
20
+ // PostScript and PDF technology for software developers.
21
+ //
22
+ //----------------------------------------------------------------------------
23
+ //
24
+ // color types gray8, gray16
25
+ //
26
+ //----------------------------------------------------------------------------
27
+
28
+ #ifndef AGG_COLOR_GRAY_INCLUDED
29
+ #define AGG_COLOR_GRAY_INCLUDED
30
+
31
+ #include "agg_basics.h"
32
+ #include "agg_color_rgba.h"
33
+
34
+ namespace agg
35
+ {
36
+
37
+ //===================================================================gray8
38
+ template<class Colorspace>
39
+ struct gray8T
40
+ {
41
+ typedef int8u value_type;
42
+ typedef int32u calc_type;
43
+ typedef int32 long_type;
44
+ enum base_scale_e
45
+ {
46
+ base_shift = 8,
47
+ base_scale = 1 << base_shift,
48
+ base_mask = base_scale - 1,
49
+ base_MSB = 1 << (base_shift - 1)
50
+ };
51
+ typedef gray8T self_type;
52
+
53
+ value_type v;
54
+ value_type a;
55
+
56
+ static value_type luminance(const rgba& c)
57
+ {
58
+ // Calculate grayscale value as per ITU-R BT.709.
59
+ return value_type(uround((0.2126 * c.r + 0.7152 * c.g + 0.0722 * c.b) * base_mask));
60
+ }
61
+
62
+ static value_type luminance(const rgba8& c)
63
+ {
64
+ // Calculate grayscale value as per ITU-R BT.709.
65
+ return value_type((55u * c.r + 184u * c.g + 18u * c.b) >> 8);
66
+ }
67
+
68
+ static void convert(gray8T<linear>& dst, const gray8T<sRGB>& src)
69
+ {
70
+ dst.v = sRGB_conv<value_type>::rgb_from_sRGB(src.v);
71
+ dst.a = src.a;
72
+ }
73
+
74
+ static void convert(gray8T<sRGB>& dst, const gray8T<linear>& src)
75
+ {
76
+ dst.v = sRGB_conv<value_type>::rgb_to_sRGB(src.v);
77
+ dst.a = src.a;
78
+ }
79
+
80
+ static void convert(gray8T<linear>& dst, const rgba8& src)
81
+ {
82
+ dst.v = luminance(src);
83
+ dst.a = src.a;
84
+ }
85
+
86
+ static void convert(gray8T<linear>& dst, const srgba8& src)
87
+ {
88
+ // The RGB weights are only valid for linear values.
89
+ convert(dst, rgba8(src));
90
+ }
91
+
92
+ static void convert(gray8T<sRGB>& dst, const rgba8& src)
93
+ {
94
+ dst.v = sRGB_conv<value_type>::rgb_to_sRGB(luminance(src));
95
+ dst.a = src.a;
96
+ }
97
+
98
+ static void convert(gray8T<sRGB>& dst, const srgba8& src)
99
+ {
100
+ // The RGB weights are only valid for linear values.
101
+ convert(dst, rgba8(src));
102
+ }
103
+
104
+ //--------------------------------------------------------------------
105
+ gray8T() {}
106
+
107
+ //--------------------------------------------------------------------
108
+ explicit gray8T(unsigned v_, unsigned a_ = base_mask) :
109
+ v(int8u(v_)), a(int8u(a_)) {}
110
+
111
+ //--------------------------------------------------------------------
112
+ gray8T(const self_type& c, unsigned a_) :
113
+ v(c.v), a(value_type(a_)) {}
114
+
115
+ //--------------------------------------------------------------------
116
+ gray8T(const rgba& c) :
117
+ v(luminance(c)),
118
+ a(value_type(uround(c.a * base_mask))) {}
119
+
120
+ //--------------------------------------------------------------------
121
+ template<class T>
122
+ gray8T(const gray8T<T>& c)
123
+ {
124
+ convert(*this, c);
125
+ }
126
+
127
+ //--------------------------------------------------------------------
128
+ template<class T>
129
+ gray8T(const rgba8T<T>& c)
130
+ {
131
+ convert(*this, c);
132
+ }
133
+
134
+ //--------------------------------------------------------------------
135
+ template<class T>
136
+ T convert_from_sRGB() const
137
+ {
138
+ typename T::value_type y = sRGB_conv<typename T::value_type>::rgb_from_sRGB(v);
139
+ return T(y, y, y, sRGB_conv<typename T::value_type>::alpha_from_sRGB(a));
140
+ }
141
+
142
+ template<class T>
143
+ T convert_to_sRGB() const
144
+ {
145
+ typename T::value_type y = sRGB_conv<typename T::value_type>::rgb_to_sRGB(v);
146
+ return T(y, y, y, sRGB_conv<typename T::value_type>::alpha_to_sRGB(a));
147
+ }
148
+
149
+ //--------------------------------------------------------------------
150
+ rgba8 make_rgba8(const linear&) const
151
+ {
152
+ return rgba8(v, v, v, a);
153
+ }
154
+
155
+ rgba8 make_rgba8(const sRGB&) const
156
+ {
157
+ return convert_from_sRGB<srgba8>();
158
+ }
159
+
160
+ operator rgba8() const
161
+ {
162
+ return make_rgba8(Colorspace());
163
+ }
164
+
165
+ //--------------------------------------------------------------------
166
+ srgba8 make_srgba8(const linear&) const
167
+ {
168
+ return convert_to_sRGB<rgba8>();
169
+ }
170
+
171
+ srgba8 make_srgba8(const sRGB&) const
172
+ {
173
+ return srgba8(v, v, v, a);
174
+ }
175
+
176
+ operator srgba8() const
177
+ {
178
+ return make_rgba8(Colorspace());
179
+ }
180
+
181
+ //--------------------------------------------------------------------
182
+ rgba16 make_rgba16(const linear&) const
183
+ {
184
+ rgba16::value_type rgb = (v << 8) | v;
185
+ return rgba16(rgb, rgb, rgb, (a << 8) | a);
186
+ }
187
+
188
+ rgba16 make_rgba16(const sRGB&) const
189
+ {
190
+ return convert_from_sRGB<rgba16>();
191
+ }
192
+
193
+ operator rgba16() const
194
+ {
195
+ return make_rgba16(Colorspace());
196
+ }
197
+
198
+ //--------------------------------------------------------------------
199
+ rgba32 make_rgba32(const linear&) const
200
+ {
201
+ rgba32::value_type v32 = v / 255.0f;
202
+ return rgba32(v32, v32, v32, a / 255.0f);
203
+ }
204
+
205
+ rgba32 make_rgba32(const sRGB&) const
206
+ {
207
+ return convert_from_sRGB<rgba32>();
208
+ }
209
+
210
+ operator rgba32() const
211
+ {
212
+ return make_rgba32(Colorspace());
213
+ }
214
+
215
+ //--------------------------------------------------------------------
216
+ static AGG_INLINE double to_double(value_type a)
217
+ {
218
+ return double(a) / base_mask;
219
+ }
220
+
221
+ //--------------------------------------------------------------------
222
+ static AGG_INLINE value_type from_double(double a)
223
+ {
224
+ return value_type(uround(a * base_mask));
225
+ }
226
+
227
+ //--------------------------------------------------------------------
228
+ static AGG_INLINE value_type empty_value()
229
+ {
230
+ return 0;
231
+ }
232
+
233
+ //--------------------------------------------------------------------
234
+ static AGG_INLINE value_type full_value()
235
+ {
236
+ return base_mask;
237
+ }
238
+
239
+ //--------------------------------------------------------------------
240
+ AGG_INLINE bool is_transparent() const
241
+ {
242
+ return a == 0;
243
+ }
244
+
245
+ //--------------------------------------------------------------------
246
+ AGG_INLINE bool is_opaque() const
247
+ {
248
+ return a == base_mask;
249
+ }
250
+
251
+ //--------------------------------------------------------------------
252
+ // Fixed-point multiply, exact over int8u.
253
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
254
+ {
255
+ calc_type t = a * b + base_MSB;
256
+ return value_type(((t >> base_shift) + t) >> base_shift);
257
+ }
258
+
259
+ //--------------------------------------------------------------------
260
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
261
+ {
262
+ if (a * b == 0)
263
+ {
264
+ return 0;
265
+ }
266
+ else if (a >= b)
267
+ {
268
+ return base_mask;
269
+ }
270
+ else return value_type((a * base_mask + (b >> 1)) / b);
271
+ }
272
+
273
+ //--------------------------------------------------------------------
274
+ template<typename T>
275
+ static AGG_INLINE T downscale(T a)
276
+ {
277
+ return a >> base_shift;
278
+ }
279
+
280
+ //--------------------------------------------------------------------
281
+ template<typename T>
282
+ static AGG_INLINE T downshift(T a, unsigned n)
283
+ {
284
+ return a >> n;
285
+ }
286
+
287
+ //--------------------------------------------------------------------
288
+ // Fixed-point multiply, exact over int8u.
289
+ // Specifically for multiplying a color component by a cover.
290
+ static AGG_INLINE value_type mult_cover(value_type a, value_type b)
291
+ {
292
+ return multiply(a, b);
293
+ }
294
+
295
+ //--------------------------------------------------------------------
296
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
297
+ {
298
+ return multiply(b, a);
299
+ }
300
+
301
+ //--------------------------------------------------------------------
302
+ // Interpolate p to q by a, assuming q is premultiplied by a.
303
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
304
+ {
305
+ return p + q - multiply(p, a);
306
+ }
307
+
308
+ //--------------------------------------------------------------------
309
+ // Interpolate p to q by a.
310
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
311
+ {
312
+ int t = (q - p) * a + base_MSB - (p > q);
313
+ return value_type(p + (((t >> base_shift) + t) >> base_shift));
314
+ }
315
+
316
+ //--------------------------------------------------------------------
317
+ self_type& clear()
318
+ {
319
+ v = a = 0;
320
+ return *this;
321
+ }
322
+
323
+ //--------------------------------------------------------------------
324
+ self_type& transparent()
325
+ {
326
+ a = 0;
327
+ return *this;
328
+ }
329
+
330
+ //--------------------------------------------------------------------
331
+ self_type& opacity(double a_)
332
+ {
333
+ if (a_ < 0) a = 0;
334
+ else if (a_ > 1) a = 1;
335
+ else a = (value_type)uround(a_ * double(base_mask));
336
+ return *this;
337
+ }
338
+
339
+ //--------------------------------------------------------------------
340
+ double opacity() const
341
+ {
342
+ return double(a) / double(base_mask);
343
+ }
344
+
345
+ //--------------------------------------------------------------------
346
+ self_type& premultiply()
347
+ {
348
+ if (a < base_mask)
349
+ {
350
+ if (a == 0) v = 0;
351
+ else v = multiply(v, a);
352
+ }
353
+ return *this;
354
+ }
355
+
356
+ //--------------------------------------------------------------------
357
+ self_type& demultiply()
358
+ {
359
+ if (a < base_mask)
360
+ {
361
+ if (a == 0)
362
+ {
363
+ v = 0;
364
+ }
365
+ else
366
+ {
367
+ calc_type v_ = (calc_type(v) * base_mask) / a;
368
+ v = value_type((v_ > base_mask) ? (value_type)base_mask : v_);
369
+ }
370
+ }
371
+ return *this;
372
+ }
373
+
374
+ //--------------------------------------------------------------------
375
+ self_type gradient(self_type c, double k) const
376
+ {
377
+ self_type ret;
378
+ calc_type ik = uround(k * base_scale);
379
+ ret.v = lerp(v, c.v, ik);
380
+ ret.a = lerp(a, c.a, ik);
381
+ return ret;
382
+ }
383
+
384
+ //--------------------------------------------------------------------
385
+ AGG_INLINE void add(const self_type& c, unsigned cover)
386
+ {
387
+ calc_type cv, ca;
388
+ if (cover == cover_mask)
389
+ {
390
+ if (c.a == base_mask)
391
+ {
392
+ *this = c;
393
+ return;
394
+ }
395
+ else
396
+ {
397
+ cv = v + c.v;
398
+ ca = a + c.a;
399
+ }
400
+ }
401
+ else
402
+ {
403
+ cv = v + mult_cover(c.v, cover);
404
+ ca = a + mult_cover(c.a, cover);
405
+ }
406
+ v = (value_type)((cv > calc_type(base_mask)) ? calc_type(base_mask) : cv);
407
+ a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca);
408
+ }
409
+
410
+ //--------------------------------------------------------------------
411
+ static self_type no_color() { return self_type(0,0); }
412
+ };
413
+
414
+ typedef gray8T<linear> gray8;
415
+ typedef gray8T<sRGB> sgray8;
416
+
417
+
418
+ //==================================================================gray16
419
+ struct gray16
420
+ {
421
+ typedef int16u value_type;
422
+ typedef int32u calc_type;
423
+ typedef int64 long_type;
424
+ enum base_scale_e
425
+ {
426
+ base_shift = 16,
427
+ base_scale = 1 << base_shift,
428
+ base_mask = base_scale - 1,
429
+ base_MSB = 1 << (base_shift - 1)
430
+ };
431
+ typedef gray16 self_type;
432
+
433
+ value_type v;
434
+ value_type a;
435
+
436
+ static value_type luminance(const rgba& c)
437
+ {
438
+ // Calculate grayscale value as per ITU-R BT.709.
439
+ return value_type(uround((0.2126 * c.r + 0.7152 * c.g + 0.0722 * c.b) * base_mask));
440
+ }
441
+
442
+ static value_type luminance(const rgba16& c)
443
+ {
444
+ // Calculate grayscale value as per ITU-R BT.709.
445
+ return value_type((13933u * c.r + 46872u * c.g + 4732u * c.b) >> 16);
446
+ }
447
+
448
+ static value_type luminance(const rgba8& c)
449
+ {
450
+ return luminance(rgba16(c));
451
+ }
452
+
453
+ static value_type luminance(const srgba8& c)
454
+ {
455
+ return luminance(rgba16(c));
456
+ }
457
+
458
+ static value_type luminance(const rgba32& c)
459
+ {
460
+ return luminance(rgba(c));
461
+ }
462
+
463
+ //--------------------------------------------------------------------
464
+ gray16() {}
465
+
466
+ //--------------------------------------------------------------------
467
+ explicit gray16(unsigned v_, unsigned a_ = base_mask) :
468
+ v(int16u(v_)), a(int16u(a_)) {}
469
+
470
+ //--------------------------------------------------------------------
471
+ gray16(const self_type& c, unsigned a_) :
472
+ v(c.v), a(value_type(a_)) {}
473
+
474
+ //--------------------------------------------------------------------
475
+ gray16(const rgba& c) :
476
+ v(luminance(c)),
477
+ a((value_type)uround(c.a * double(base_mask))) {}
478
+
479
+ //--------------------------------------------------------------------
480
+ gray16(const rgba8& c) :
481
+ v(luminance(c)),
482
+ a((value_type(c.a) << 8) | c.a) {}
483
+
484
+ //--------------------------------------------------------------------
485
+ gray16(const srgba8& c) :
486
+ v(luminance(c)),
487
+ a((value_type(c.a) << 8) | c.a) {}
488
+
489
+ //--------------------------------------------------------------------
490
+ gray16(const rgba16& c) :
491
+ v(luminance(c)),
492
+ a(c.a) {}
493
+
494
+ //--------------------------------------------------------------------
495
+ gray16(const gray8& c) :
496
+ v((value_type(c.v) << 8) | c.v),
497
+ a((value_type(c.a) << 8) | c.a) {}
498
+
499
+ //--------------------------------------------------------------------
500
+ gray16(const sgray8& c) :
501
+ v(sRGB_conv<value_type>::rgb_from_sRGB(c.v)),
502
+ a(sRGB_conv<value_type>::alpha_from_sRGB(c.a)) {}
503
+
504
+ //--------------------------------------------------------------------
505
+ operator rgba8() const
506
+ {
507
+ return rgba8(v >> 8, v >> 8, v >> 8, a >> 8);
508
+ }
509
+
510
+ //--------------------------------------------------------------------
511
+ operator srgba8() const
512
+ {
513
+ value_type y = sRGB_conv<value_type>::rgb_to_sRGB(v);
514
+ return srgba8(y, y, y, sRGB_conv<value_type>::alpha_to_sRGB(a));
515
+ }
516
+
517
+ //--------------------------------------------------------------------
518
+ operator rgba16() const
519
+ {
520
+ return rgba16(v, v, v, a);
521
+ }
522
+
523
+ //--------------------------------------------------------------------
524
+ operator rgba32() const
525
+ {
526
+ rgba32::value_type v32 = v / 65535.0f;
527
+ return rgba32(v32, v32, v32, a / 65535.0f);
528
+ }
529
+
530
+ //--------------------------------------------------------------------
531
+ operator gray8() const
532
+ {
533
+ return gray8(v >> 8, a >> 8);
534
+ }
535
+
536
+ //--------------------------------------------------------------------
537
+ operator sgray8() const
538
+ {
539
+ return sgray8(
540
+ sRGB_conv<value_type>::rgb_to_sRGB(v),
541
+ sRGB_conv<value_type>::alpha_to_sRGB(a));
542
+ }
543
+
544
+ //--------------------------------------------------------------------
545
+ static AGG_INLINE double to_double(value_type a)
546
+ {
547
+ return double(a) / base_mask;
548
+ }
549
+
550
+ //--------------------------------------------------------------------
551
+ static AGG_INLINE value_type from_double(double a)
552
+ {
553
+ return value_type(uround(a * base_mask));
554
+ }
555
+
556
+ //--------------------------------------------------------------------
557
+ static AGG_INLINE value_type empty_value()
558
+ {
559
+ return 0;
560
+ }
561
+
562
+ //--------------------------------------------------------------------
563
+ static AGG_INLINE value_type full_value()
564
+ {
565
+ return base_mask;
566
+ }
567
+
568
+ //--------------------------------------------------------------------
569
+ AGG_INLINE bool is_transparent() const
570
+ {
571
+ return a == 0;
572
+ }
573
+
574
+ //--------------------------------------------------------------------
575
+ AGG_INLINE bool is_opaque() const
576
+ {
577
+ return a == base_mask;
578
+ }
579
+
580
+ //--------------------------------------------------------------------
581
+ // Fixed-point multiply, exact over int16u.
582
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
583
+ {
584
+ calc_type t = a * b + base_MSB;
585
+ return value_type(((t >> base_shift) + t) >> base_shift);
586
+ }
587
+
588
+ //--------------------------------------------------------------------
589
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
590
+ {
591
+ if (a * b == 0)
592
+ {
593
+ return 0;
594
+ }
595
+ else if (a >= b)
596
+ {
597
+ return base_mask;
598
+ }
599
+ else return value_type((a * base_mask + (b >> 1)) / b);
600
+ }
601
+
602
+ //--------------------------------------------------------------------
603
+ template<typename T>
604
+ static AGG_INLINE T downscale(T a)
605
+ {
606
+ return a >> base_shift;
607
+ }
608
+
609
+ //--------------------------------------------------------------------
610
+ template<typename T>
611
+ static AGG_INLINE T downshift(T a, unsigned n)
612
+ {
613
+ return a >> n;
614
+ }
615
+
616
+ //--------------------------------------------------------------------
617
+ // Fixed-point multiply, almost exact over int16u.
618
+ // Specifically for multiplying a color component by a cover.
619
+ static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
620
+ {
621
+ return multiply(a, b << 8 | b);
622
+ }
623
+
624
+ //--------------------------------------------------------------------
625
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
626
+ {
627
+ return mult_cover(b, a) >> 8;
628
+ }
629
+
630
+ //--------------------------------------------------------------------
631
+ // Interpolate p to q by a, assuming q is premultiplied by a.
632
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
633
+ {
634
+ return p + q - multiply(p, a);
635
+ }
636
+
637
+ //--------------------------------------------------------------------
638
+ // Interpolate p to q by a.
639
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
640
+ {
641
+ int t = (q - p) * a + base_MSB - (p > q);
642
+ return value_type(p + (((t >> base_shift) + t) >> base_shift));
643
+ }
644
+
645
+ //--------------------------------------------------------------------
646
+ self_type& clear()
647
+ {
648
+ v = a = 0;
649
+ return *this;
650
+ }
651
+
652
+ //--------------------------------------------------------------------
653
+ self_type& transparent()
654
+ {
655
+ a = 0;
656
+ return *this;
657
+ }
658
+
659
+ //--------------------------------------------------------------------
660
+ self_type& opacity(double a_)
661
+ {
662
+ if (a_ < 0) a = 0;
663
+ else if(a_ > 1) a = 1;
664
+ else a = (value_type)uround(a_ * double(base_mask));
665
+ return *this;
666
+ }
667
+
668
+ //--------------------------------------------------------------------
669
+ double opacity() const
670
+ {
671
+ return double(a) / double(base_mask);
672
+ }
673
+
674
+
675
+ //--------------------------------------------------------------------
676
+ self_type& premultiply()
677
+ {
678
+ if (a < base_mask)
679
+ {
680
+ if(a == 0) v = 0;
681
+ else v = multiply(v, a);
682
+ }
683
+ return *this;
684
+ }
685
+
686
+ //--------------------------------------------------------------------
687
+ self_type& demultiply()
688
+ {
689
+ if (a < base_mask)
690
+ {
691
+ if (a == 0)
692
+ {
693
+ v = 0;
694
+ }
695
+ else
696
+ {
697
+ calc_type v_ = (calc_type(v) * base_mask) / a;
698
+ v = (v_ > base_mask) ? value_type(base_mask) : value_type(v_);
699
+ }
700
+ }
701
+ return *this;
702
+ }
703
+
704
+ //--------------------------------------------------------------------
705
+ self_type gradient(self_type c, double k) const
706
+ {
707
+ self_type ret;
708
+ calc_type ik = uround(k * base_scale);
709
+ ret.v = lerp(v, c.v, ik);
710
+ ret.a = lerp(a, c.a, ik);
711
+ return ret;
712
+ }
713
+
714
+ //--------------------------------------------------------------------
715
+ AGG_INLINE void add(const self_type& c, unsigned cover)
716
+ {
717
+ calc_type cv, ca;
718
+ if (cover == cover_mask)
719
+ {
720
+ if (c.a == base_mask)
721
+ {
722
+ *this = c;
723
+ return;
724
+ }
725
+ else
726
+ {
727
+ cv = v + c.v;
728
+ ca = a + c.a;
729
+ }
730
+ }
731
+ else
732
+ {
733
+ cv = v + mult_cover(c.v, cover);
734
+ ca = a + mult_cover(c.a, cover);
735
+ }
736
+ v = (value_type)((cv > calc_type(base_mask)) ? calc_type(base_mask) : cv);
737
+ a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca);
738
+ }
739
+
740
+ //--------------------------------------------------------------------
741
+ static self_type no_color() { return self_type(0,0); }
742
+ };
743
+
744
+
745
+ //===================================================================gray32
746
+ struct gray32
747
+ {
748
+ typedef float value_type;
749
+ typedef double calc_type;
750
+ typedef double long_type;
751
+ typedef gray32 self_type;
752
+
753
+ value_type v;
754
+ value_type a;
755
+
756
+ // Calculate grayscale value as per ITU-R BT.709.
757
+ static value_type luminance(double r, double g, double b)
758
+ {
759
+ return value_type(0.2126 * r + 0.7152 * g + 0.0722 * b);
760
+ }
761
+
762
+ static value_type luminance(const rgba& c)
763
+ {
764
+ return luminance(c.r, c.g, c.b);
765
+ }
766
+
767
+ static value_type luminance(const rgba32& c)
768
+ {
769
+ return luminance(c.r, c.g, c.b);
770
+ }
771
+
772
+ static value_type luminance(const rgba8& c)
773
+ {
774
+ return luminance(c.r / 255.0, c.g / 255.0, c.g / 255.0);
775
+ }
776
+
777
+ static value_type luminance(const rgba16& c)
778
+ {
779
+ return luminance(c.r / 65535.0, c.g / 65535.0, c.g / 65535.0);
780
+ }
781
+
782
+ //--------------------------------------------------------------------
783
+ gray32() {}
784
+
785
+ //--------------------------------------------------------------------
786
+ explicit gray32(value_type v_, value_type a_ = 1) :
787
+ v(v_), a(a_) {}
788
+
789
+ //--------------------------------------------------------------------
790
+ gray32(const self_type& c, value_type a_) :
791
+ v(c.v), a(a_) {}
792
+
793
+ //--------------------------------------------------------------------
794
+ gray32(const rgba& c) :
795
+ v(luminance(c)),
796
+ a(value_type(c.a)) {}
797
+
798
+ //--------------------------------------------------------------------
799
+ gray32(const rgba8& c) :
800
+ v(luminance(c)),
801
+ a(value_type(c.a / 255.0)) {}
802
+
803
+ //--------------------------------------------------------------------
804
+ gray32(const srgba8& c) :
805
+ v(luminance(rgba32(c))),
806
+ a(value_type(c.a / 255.0)) {}
807
+
808
+ //--------------------------------------------------------------------
809
+ gray32(const rgba16& c) :
810
+ v(luminance(c)),
811
+ a(value_type(c.a / 65535.0)) {}
812
+
813
+ //--------------------------------------------------------------------
814
+ gray32(const rgba32& c) :
815
+ v(luminance(c)),
816
+ a(value_type(c.a)) {}
817
+
818
+ //--------------------------------------------------------------------
819
+ gray32(const gray8& c) :
820
+ v(value_type(c.v / 255.0)),
821
+ a(value_type(c.a / 255.0)) {}
822
+
823
+ //--------------------------------------------------------------------
824
+ gray32(const sgray8& c) :
825
+ v(sRGB_conv<value_type>::rgb_from_sRGB(c.v)),
826
+ a(sRGB_conv<value_type>::alpha_from_sRGB(c.a)) {}
827
+
828
+ //--------------------------------------------------------------------
829
+ gray32(const gray16& c) :
830
+ v(value_type(c.v / 65535.0)),
831
+ a(value_type(c.a / 65535.0)) {}
832
+
833
+ //--------------------------------------------------------------------
834
+ operator rgba() const
835
+ {
836
+ return rgba(v, v, v, a);
837
+ }
838
+
839
+ //--------------------------------------------------------------------
840
+ operator gray8() const
841
+ {
842
+ return gray8(uround(v * 255.0), uround(a * 255.0));
843
+ }
844
+
845
+ //--------------------------------------------------------------------
846
+ operator sgray8() const
847
+ {
848
+ // Return (non-premultiplied) sRGB values.
849
+ return sgray8(
850
+ sRGB_conv<value_type>::rgb_to_sRGB(v),
851
+ sRGB_conv<value_type>::alpha_to_sRGB(a));
852
+ }
853
+
854
+ //--------------------------------------------------------------------
855
+ operator gray16() const
856
+ {
857
+ return gray16(uround(v * 65535.0), uround(a * 65535.0));
858
+ }
859
+
860
+ //--------------------------------------------------------------------
861
+ operator rgba8() const
862
+ {
863
+ rgba8::value_type y = uround(v * 255.0);
864
+ return rgba8(y, y, y, uround(a * 255.0));
865
+ }
866
+
867
+ //--------------------------------------------------------------------
868
+ operator srgba8() const
869
+ {
870
+ srgba8::value_type y = sRGB_conv<value_type>::rgb_to_sRGB(v);
871
+ return srgba8(y, y, y, sRGB_conv<value_type>::alpha_to_sRGB(a));
872
+ }
873
+
874
+ //--------------------------------------------------------------------
875
+ operator rgba16() const
876
+ {
877
+ rgba16::value_type y = uround(v * 65535.0);
878
+ return rgba16(y, y, y, uround(a * 65535.0));
879
+ }
880
+
881
+ //--------------------------------------------------------------------
882
+ operator rgba32() const
883
+ {
884
+ return rgba32(v, v, v, a);
885
+ }
886
+
887
+ //--------------------------------------------------------------------
888
+ static AGG_INLINE double to_double(value_type a)
889
+ {
890
+ return a;
891
+ }
892
+
893
+ //--------------------------------------------------------------------
894
+ static AGG_INLINE value_type from_double(double a)
895
+ {
896
+ return value_type(a);
897
+ }
898
+
899
+ //--------------------------------------------------------------------
900
+ static AGG_INLINE value_type empty_value()
901
+ {
902
+ return 0;
903
+ }
904
+
905
+ //--------------------------------------------------------------------
906
+ static AGG_INLINE value_type full_value()
907
+ {
908
+ return 1;
909
+ }
910
+
911
+ //--------------------------------------------------------------------
912
+ AGG_INLINE bool is_transparent() const
913
+ {
914
+ return a <= 0;
915
+ }
916
+
917
+ //--------------------------------------------------------------------
918
+ AGG_INLINE bool is_opaque() const
919
+ {
920
+ return a >= 1;
921
+ }
922
+
923
+ //--------------------------------------------------------------------
924
+ static AGG_INLINE value_type invert(value_type x)
925
+ {
926
+ return 1 - x;
927
+ }
928
+
929
+ //--------------------------------------------------------------------
930
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
931
+ {
932
+ return value_type(a * b);
933
+ }
934
+
935
+ //--------------------------------------------------------------------
936
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
937
+ {
938
+ return (b == 0) ? 0 : value_type(a / b);
939
+ }
940
+
941
+ //--------------------------------------------------------------------
942
+ template<typename T>
943
+ static AGG_INLINE T downscale(T a)
944
+ {
945
+ return a;
946
+ }
947
+
948
+ //--------------------------------------------------------------------
949
+ template<typename T>
950
+ static AGG_INLINE T downshift(T a, unsigned n)
951
+ {
952
+ return n > 0 ? a / (1 << n) : a;
953
+ }
954
+
955
+ //--------------------------------------------------------------------
956
+ static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
957
+ {
958
+ return value_type(a * b / cover_mask);
959
+ }
960
+
961
+ //--------------------------------------------------------------------
962
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
963
+ {
964
+ return cover_type(uround(a * b));
965
+ }
966
+
967
+ //--------------------------------------------------------------------
968
+ // Interpolate p to q by a, assuming q is premultiplied by a.
969
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
970
+ {
971
+ return (1 - a) * p + q; // more accurate than "p + q - p * a"
972
+ }
973
+
974
+ //--------------------------------------------------------------------
975
+ // Interpolate p to q by a.
976
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
977
+ {
978
+ // The form "p + a * (q - p)" avoids a multiplication, but may produce an
979
+ // inaccurate result. For example, "p + (q - p)" may not be exactly equal
980
+ // to q. Therefore, stick to the basic expression, which at least produces
981
+ // the correct result at either extreme.
982
+ return (1 - a) * p + a * q;
983
+ }
984
+
985
+ //--------------------------------------------------------------------
986
+ self_type& clear()
987
+ {
988
+ v = a = 0;
989
+ return *this;
990
+ }
991
+
992
+ //--------------------------------------------------------------------
993
+ self_type& transparent()
994
+ {
995
+ a = 0;
996
+ return *this;
997
+ }
998
+
999
+ //--------------------------------------------------------------------
1000
+ self_type& opacity(double a_)
1001
+ {
1002
+ if (a_ < 0) a = 0;
1003
+ else if (a_ > 1) a = 1;
1004
+ else a = value_type(a_);
1005
+ return *this;
1006
+ }
1007
+
1008
+ //--------------------------------------------------------------------
1009
+ double opacity() const
1010
+ {
1011
+ return a;
1012
+ }
1013
+
1014
+
1015
+ //--------------------------------------------------------------------
1016
+ self_type& premultiply()
1017
+ {
1018
+ if (a < 0) v = 0;
1019
+ else if(a < 1) v *= a;
1020
+ return *this;
1021
+ }
1022
+
1023
+ //--------------------------------------------------------------------
1024
+ self_type& demultiply()
1025
+ {
1026
+ if (a < 0) v = 0;
1027
+ else if (a < 1) v /= a;
1028
+ return *this;
1029
+ }
1030
+
1031
+ //--------------------------------------------------------------------
1032
+ self_type gradient(self_type c, double k) const
1033
+ {
1034
+ return self_type(
1035
+ value_type(v + (c.v - v) * k),
1036
+ value_type(a + (c.a - a) * k));
1037
+ }
1038
+
1039
+ //--------------------------------------------------------------------
1040
+ static self_type no_color() { return self_type(0,0); }
1041
+ };
1042
+ }
1043
+
1044
+
1045
+
1046
+
1047
+ #endif
data/bundled_deps/agg/agg/agg_color_rgba.h ADDED
@@ -0,0 +1,1353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ //
12
+ // Adaptation for high precision colors has been sponsored by
13
+ // Liberty Technology Systems, Inc., visit http://lib-sys.com
14
+ //
15
+ // Liberty Technology Systems, Inc. is the provider of
16
+ // PostScript and PDF technology for software developers.
17
+ //
18
+ //----------------------------------------------------------------------------
19
+ // Contact: [email protected]
20
21
+ // http://www.antigrain.com
22
+ //----------------------------------------------------------------------------
23
+
24
+ #ifndef AGG_COLOR_RGBA_INCLUDED
25
+ #define AGG_COLOR_RGBA_INCLUDED
26
+
27
+ #include <math.h>
28
+ #include "agg_basics.h"
29
+ #include "agg_gamma_lut.h"
30
+
31
+ namespace agg
32
+ {
33
+ // Supported component orders for RGB and RGBA pixel formats
34
+ //=======================================================================
35
+ struct order_rgb { enum rgb_e { R=0, G=1, B=2, N=3 }; };
36
+ struct order_bgr { enum bgr_e { B=0, G=1, R=2, N=3 }; };
37
+ struct order_rgba { enum rgba_e { R=0, G=1, B=2, A=3, N=4 }; };
38
+ struct order_argb { enum argb_e { A=0, R=1, G=2, B=3, N=4 }; };
39
+ struct order_abgr { enum abgr_e { A=0, B=1, G=2, R=3, N=4 }; };
40
+ struct order_bgra { enum bgra_e { B=0, G=1, R=2, A=3, N=4 }; };
41
+
42
+ // Colorspace tag types.
43
+ struct linear {};
44
+ struct sRGB {};
45
+
46
+ //====================================================================rgba
47
+ struct rgba
48
+ {
49
+ typedef double value_type;
50
+
51
+ double r;
52
+ double g;
53
+ double b;
54
+ double a;
55
+
56
+ //--------------------------------------------------------------------
57
+ rgba() {}
58
+
59
+ //--------------------------------------------------------------------
60
+ rgba(double r_, double g_, double b_, double a_=1.0) :
61
+ r(r_), g(g_), b(b_), a(a_) {}
62
+
63
+ //--------------------------------------------------------------------
64
+ rgba(const rgba& c, double a_) : r(c.r), g(c.g), b(c.b), a(a_) {}
65
+
66
+ //--------------------------------------------------------------------
67
+ rgba& clear()
68
+ {
69
+ r = g = b = a = 0;
70
+ return *this;
71
+ }
72
+
73
+ //--------------------------------------------------------------------
74
+ rgba& transparent()
75
+ {
76
+ a = 0;
77
+ return *this;
78
+ }
79
+
80
+ //--------------------------------------------------------------------
81
+ rgba& opacity(double a_)
82
+ {
83
+ if (a_ < 0) a = 0;
84
+ else if (a_ > 1) a = 1;
85
+ else a = a_;
86
+ return *this;
87
+ }
88
+
89
+ //--------------------------------------------------------------------
90
+ double opacity() const
91
+ {
92
+ return a;
93
+ }
94
+
95
+ //--------------------------------------------------------------------
96
+ rgba& premultiply()
97
+ {
98
+ r *= a;
99
+ g *= a;
100
+ b *= a;
101
+ return *this;
102
+ }
103
+
104
+ //--------------------------------------------------------------------
105
+ rgba& premultiply(double a_)
106
+ {
107
+ if (a <= 0 || a_ <= 0)
108
+ {
109
+ r = g = b = a = 0;
110
+ }
111
+ else
112
+ {
113
+ a_ /= a;
114
+ r *= a_;
115
+ g *= a_;
116
+ b *= a_;
117
+ a = a_;
118
+ }
119
+ return *this;
120
+ }
121
+
122
+ //--------------------------------------------------------------------
123
+ rgba& demultiply()
124
+ {
125
+ if (a == 0)
126
+ {
127
+ r = g = b = 0;
128
+ }
129
+ else
130
+ {
131
+ double a_ = 1.0 / a;
132
+ r *= a_;
133
+ g *= a_;
134
+ b *= a_;
135
+ }
136
+ return *this;
137
+ }
138
+
139
+
140
+ //--------------------------------------------------------------------
141
+ rgba gradient(rgba c, double k) const
142
+ {
143
+ rgba ret;
144
+ ret.r = r + (c.r - r) * k;
145
+ ret.g = g + (c.g - g) * k;
146
+ ret.b = b + (c.b - b) * k;
147
+ ret.a = a + (c.a - a) * k;
148
+ return ret;
149
+ }
150
+
151
+ rgba& operator+=(const rgba& c)
152
+ {
153
+ r += c.r;
154
+ g += c.g;
155
+ b += c.b;
156
+ a += c.a;
157
+ return *this;
158
+ }
159
+
160
+ rgba& operator*=(double k)
161
+ {
162
+ r *= k;
163
+ g *= k;
164
+ b *= k;
165
+ a *= k;
166
+ return *this;
167
+ }
168
+
169
+ //--------------------------------------------------------------------
170
+ static rgba no_color() { return rgba(0,0,0,0); }
171
+
172
+ //--------------------------------------------------------------------
173
+ static rgba from_wavelength(double wl, double gamma = 1.0);
174
+
175
+ //--------------------------------------------------------------------
176
+ explicit rgba(double wavelen, double gamma=1.0)
177
+ {
178
+ *this = from_wavelength(wavelen, gamma);
179
+ }
180
+
181
+ };
182
+
183
+ inline rgba operator+(const rgba& a, const rgba& b)
184
+ {
185
+ return rgba(a) += b;
186
+ }
187
+
188
+ inline rgba operator*(const rgba& a, double b)
189
+ {
190
+ return rgba(a) *= b;
191
+ }
192
+
193
+ //------------------------------------------------------------------------
194
+ inline rgba rgba::from_wavelength(double wl, double gamma)
195
+ {
196
+ rgba t(0.0, 0.0, 0.0);
197
+
198
+ if (wl >= 380.0 && wl <= 440.0)
199
+ {
200
+ t.r = -1.0 * (wl - 440.0) / (440.0 - 380.0);
201
+ t.b = 1.0;
202
+ }
203
+ else if (wl >= 440.0 && wl <= 490.0)
204
+ {
205
+ t.g = (wl - 440.0) / (490.0 - 440.0);
206
+ t.b = 1.0;
207
+ }
208
+ else if (wl >= 490.0 && wl <= 510.0)
209
+ {
210
+ t.g = 1.0;
211
+ t.b = -1.0 * (wl - 510.0) / (510.0 - 490.0);
212
+ }
213
+ else if (wl >= 510.0 && wl <= 580.0)
214
+ {
215
+ t.r = (wl - 510.0) / (580.0 - 510.0);
216
+ t.g = 1.0;
217
+ }
218
+ else if (wl >= 580.0 && wl <= 645.0)
219
+ {
220
+ t.r = 1.0;
221
+ t.g = -1.0 * (wl - 645.0) / (645.0 - 580.0);
222
+ }
223
+ else if (wl >= 645.0 && wl <= 780.0)
224
+ {
225
+ t.r = 1.0;
226
+ }
227
+
228
+ double s = 1.0;
229
+ if (wl > 700.0) s = 0.3 + 0.7 * (780.0 - wl) / (780.0 - 700.0);
230
+ else if (wl < 420.0) s = 0.3 + 0.7 * (wl - 380.0) / (420.0 - 380.0);
231
+
232
+ t.r = pow(t.r * s, gamma);
233
+ t.g = pow(t.g * s, gamma);
234
+ t.b = pow(t.b * s, gamma);
235
+ return t;
236
+ }
237
+
238
+ inline rgba rgba_pre(double r, double g, double b, double a)
239
+ {
240
+ return rgba(r, g, b, a).premultiply();
241
+ }
242
+
243
+
244
+ //===================================================================rgba8
245
+ template<class Colorspace>
246
+ struct rgba8T
247
+ {
248
+ typedef int8u value_type;
249
+ typedef int32u calc_type;
250
+ typedef int32 long_type;
251
+ enum base_scale_e
252
+ {
253
+ base_shift = 8,
254
+ base_scale = 1 << base_shift,
255
+ base_mask = base_scale - 1,
256
+ base_MSB = 1 << (base_shift - 1)
257
+ };
258
+ typedef rgba8T self_type;
259
+
260
+
261
+ value_type r;
262
+ value_type g;
263
+ value_type b;
264
+ value_type a;
265
+
266
+ static void convert(rgba8T<linear>& dst, const rgba8T<sRGB>& src)
267
+ {
268
+ dst.r = sRGB_conv<value_type>::rgb_from_sRGB(src.r);
269
+ dst.g = sRGB_conv<value_type>::rgb_from_sRGB(src.g);
270
+ dst.b = sRGB_conv<value_type>::rgb_from_sRGB(src.b);
271
+ dst.a = src.a;
272
+ }
273
+
274
+ static void convert(rgba8T<sRGB>& dst, const rgba8T<linear>& src)
275
+ {
276
+ dst.r = sRGB_conv<value_type>::rgb_to_sRGB(src.r);
277
+ dst.g = sRGB_conv<value_type>::rgb_to_sRGB(src.g);
278
+ dst.b = sRGB_conv<value_type>::rgb_to_sRGB(src.b);
279
+ dst.a = src.a;
280
+ }
281
+
282
+ static void convert(rgba8T<linear>& dst, const rgba& src)
283
+ {
284
+ dst.r = value_type(uround(src.r * base_mask));
285
+ dst.g = value_type(uround(src.g * base_mask));
286
+ dst.b = value_type(uround(src.b * base_mask));
287
+ dst.a = value_type(uround(src.a * base_mask));
288
+ }
289
+
290
+ static void convert(rgba8T<sRGB>& dst, const rgba& src)
291
+ {
292
+ // Use the "float" table.
293
+ dst.r = sRGB_conv<float>::rgb_to_sRGB(float(src.r));
294
+ dst.g = sRGB_conv<float>::rgb_to_sRGB(float(src.g));
295
+ dst.b = sRGB_conv<float>::rgb_to_sRGB(float(src.b));
296
+ dst.a = sRGB_conv<float>::alpha_to_sRGB(float(src.a));
297
+ }
298
+
299
+ static void convert(rgba& dst, const rgba8T<linear>& src)
300
+ {
301
+ dst.r = src.r / 255.0;
302
+ dst.g = src.g / 255.0;
303
+ dst.b = src.b / 255.0;
304
+ dst.a = src.a / 255.0;
305
+ }
306
+
307
+ static void convert(rgba& dst, const rgba8T<sRGB>& src)
308
+ {
309
+ // Use the "float" table.
310
+ dst.r = sRGB_conv<float>::rgb_from_sRGB(src.r);
311
+ dst.g = sRGB_conv<float>::rgb_from_sRGB(src.g);
312
+ dst.b = sRGB_conv<float>::rgb_from_sRGB(src.b);
313
+ dst.a = sRGB_conv<float>::alpha_from_sRGB(src.a);
314
+ }
315
+
316
+ //--------------------------------------------------------------------
317
+ rgba8T() {}
318
+
319
+ //--------------------------------------------------------------------
320
+ rgba8T(unsigned r_, unsigned g_, unsigned b_, unsigned a_ = base_mask) :
321
+ r(value_type(r_)),
322
+ g(value_type(g_)),
323
+ b(value_type(b_)),
324
+ a(value_type(a_)) {}
325
+
326
+ //--------------------------------------------------------------------
327
+ rgba8T(const rgba& c)
328
+ {
329
+ convert(*this, c);
330
+ }
331
+
332
+ //--------------------------------------------------------------------
333
+ rgba8T(const self_type& c, unsigned a_) :
334
+ r(c.r), g(c.g), b(c.b), a(value_type(a_)) {}
335
+
336
+ //--------------------------------------------------------------------
337
+ template<class T>
338
+ rgba8T(const rgba8T<T>& c)
339
+ {
340
+ convert(*this, c);
341
+ }
342
+
343
+ //--------------------------------------------------------------------
344
+ operator rgba() const
345
+ {
346
+ rgba c;
347
+ convert(c, *this);
348
+ return c;
349
+ }
350
+
351
+ //--------------------------------------------------------------------
352
+ static AGG_INLINE double to_double(value_type a)
353
+ {
354
+ return double(a) / base_mask;
355
+ }
356
+
357
+ //--------------------------------------------------------------------
358
+ static AGG_INLINE value_type from_double(double a)
359
+ {
360
+ return value_type(uround(a * base_mask));
361
+ }
362
+
363
+ //--------------------------------------------------------------------
364
+ static AGG_INLINE value_type empty_value()
365
+ {
366
+ return 0;
367
+ }
368
+
369
+ //--------------------------------------------------------------------
370
+ static AGG_INLINE value_type full_value()
371
+ {
372
+ return base_mask;
373
+ }
374
+
375
+ //--------------------------------------------------------------------
376
+ AGG_INLINE bool is_transparent() const
377
+ {
378
+ return a == 0;
379
+ }
380
+
381
+ //--------------------------------------------------------------------
382
+ AGG_INLINE bool is_opaque() const
383
+ {
384
+ return a == base_mask;
385
+ }
386
+
387
+ //--------------------------------------------------------------------
388
+ static AGG_INLINE value_type invert(value_type x)
389
+ {
390
+ return base_mask - x;
391
+ }
392
+
393
+ //--------------------------------------------------------------------
394
+ // Fixed-point multiply, exact over int8u.
395
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
396
+ {
397
+ calc_type t = a * b + base_MSB;
398
+ return value_type(((t >> base_shift) + t) >> base_shift);
399
+ }
400
+
401
+ //--------------------------------------------------------------------
402
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
403
+ {
404
+ if (a * b == 0)
405
+ {
406
+ return 0;
407
+ }
408
+ else if (a >= b)
409
+ {
410
+ return base_mask;
411
+ }
412
+ else return value_type((a * base_mask + (b >> 1)) / b);
413
+ }
414
+
415
+ //--------------------------------------------------------------------
416
+ template<typename T>
417
+ static AGG_INLINE T downscale(T a)
418
+ {
419
+ return a >> base_shift;
420
+ }
421
+
422
+ //--------------------------------------------------------------------
423
+ template<typename T>
424
+ static AGG_INLINE T downshift(T a, unsigned n)
425
+ {
426
+ return a >> n;
427
+ }
428
+
429
+ //--------------------------------------------------------------------
430
+ // Fixed-point multiply, exact over int8u.
431
+ // Specifically for multiplying a color component by a cover.
432
+ static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
433
+ {
434
+ return multiply(a, b);
435
+ }
436
+
437
+ //--------------------------------------------------------------------
438
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
439
+ {
440
+ return multiply(b, a);
441
+ }
442
+
443
+ //--------------------------------------------------------------------
444
+ // Interpolate p to q by a, assuming q is premultiplied by a.
445
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
446
+ {
447
+ return p + q - multiply(p, a);
448
+ }
449
+
450
+ //--------------------------------------------------------------------
451
+ // Interpolate p to q by a.
452
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
453
+ {
454
+ int t = (q - p) * a + base_MSB - (p > q);
455
+ return value_type(p + (((t >> base_shift) + t) >> base_shift));
456
+ }
457
+
458
+ //--------------------------------------------------------------------
459
+ self_type& clear()
460
+ {
461
+ r = g = b = a = 0;
462
+ return *this;
463
+ }
464
+
465
+ //--------------------------------------------------------------------
466
+ self_type& transparent()
467
+ {
468
+ a = 0;
469
+ return *this;
470
+ }
471
+
472
+ //--------------------------------------------------------------------
473
+ self_type& opacity(double a_)
474
+ {
475
+ if (a_ < 0) a = 0;
476
+ else if (a_ > 1) a = 1;
477
+ else a = (value_type)uround(a_ * double(base_mask));
478
+ return *this;
479
+ }
480
+
481
+ //--------------------------------------------------------------------
482
+ double opacity() const
483
+ {
484
+ return double(a) / double(base_mask);
485
+ }
486
+
487
+ //--------------------------------------------------------------------
488
+ AGG_INLINE self_type& premultiply()
489
+ {
490
+ if (a != base_mask)
491
+ {
492
+ if (a == 0)
493
+ {
494
+ r = g = b = 0;
495
+ }
496
+ else
497
+ {
498
+ r = multiply(r, a);
499
+ g = multiply(g, a);
500
+ b = multiply(b, a);
501
+ }
502
+ }
503
+ return *this;
504
+ }
505
+
506
+ //--------------------------------------------------------------------
507
+ AGG_INLINE self_type& premultiply(unsigned a_)
508
+ {
509
+ if (a != base_mask || a_ < base_mask)
510
+ {
511
+ if (a == 0 || a_ == 0)
512
+ {
513
+ r = g = b = a = 0;
514
+ }
515
+ else
516
+ {
517
+ calc_type r_ = (calc_type(r) * a_) / a;
518
+ calc_type g_ = (calc_type(g) * a_) / a;
519
+ calc_type b_ = (calc_type(b) * a_) / a;
520
+ r = value_type((r_ > a_) ? a_ : r_);
521
+ g = value_type((g_ > a_) ? a_ : g_);
522
+ b = value_type((b_ > a_) ? a_ : b_);
523
+ a = value_type(a_);
524
+ }
525
+ }
526
+ return *this;
527
+ }
528
+
529
+ //--------------------------------------------------------------------
530
+ AGG_INLINE self_type& demultiply()
531
+ {
532
+ if (a < base_mask)
533
+ {
534
+ if (a == 0)
535
+ {
536
+ r = g = b = 0;
537
+ }
538
+ else
539
+ {
540
+ calc_type r_ = (calc_type(r) * base_mask) / a;
541
+ calc_type g_ = (calc_type(g) * base_mask) / a;
542
+ calc_type b_ = (calc_type(b) * base_mask) / a;
543
+ r = value_type((r_ > calc_type(base_mask)) ? calc_type(base_mask) : r_);
544
+ g = value_type((g_ > calc_type(base_mask)) ? calc_type(base_mask) : g_);
545
+ b = value_type((b_ > calc_type(base_mask)) ? calc_type(base_mask) : b_);
546
+ }
547
+ }
548
+ return *this;
549
+ }
550
+
551
+ //--------------------------------------------------------------------
552
+ AGG_INLINE self_type gradient(const self_type& c, double k) const
553
+ {
554
+ self_type ret;
555
+ calc_type ik = uround(k * base_mask);
556
+ ret.r = lerp(r, c.r, ik);
557
+ ret.g = lerp(g, c.g, ik);
558
+ ret.b = lerp(b, c.b, ik);
559
+ ret.a = lerp(a, c.a, ik);
560
+ return ret;
561
+ }
562
+
563
+ //--------------------------------------------------------------------
564
+ AGG_INLINE void add(const self_type& c, unsigned cover)
565
+ {
566
+ calc_type cr, cg, cb, ca;
567
+ if (cover == cover_mask)
568
+ {
569
+ if (c.a == base_mask)
570
+ {
571
+ *this = c;
572
+ return;
573
+ }
574
+ else
575
+ {
576
+ cr = r + c.r;
577
+ cg = g + c.g;
578
+ cb = b + c.b;
579
+ ca = a + c.a;
580
+ }
581
+ }
582
+ else
583
+ {
584
+ cr = r + mult_cover(c.r, cover);
585
+ cg = g + mult_cover(c.g, cover);
586
+ cb = b + mult_cover(c.b, cover);
587
+ ca = a + mult_cover(c.a, cover);
588
+ }
589
+ r = (value_type)((cr > calc_type(base_mask)) ? calc_type(base_mask) : cr);
590
+ g = (value_type)((cg > calc_type(base_mask)) ? calc_type(base_mask) : cg);
591
+ b = (value_type)((cb > calc_type(base_mask)) ? calc_type(base_mask) : cb);
592
+ a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca);
593
+ }
594
+
595
+ //--------------------------------------------------------------------
596
+ template<class GammaLUT>
597
+ AGG_INLINE void apply_gamma_dir(const GammaLUT& gamma)
598
+ {
599
+ r = gamma.dir(r);
600
+ g = gamma.dir(g);
601
+ b = gamma.dir(b);
602
+ }
603
+
604
+ //--------------------------------------------------------------------
605
+ template<class GammaLUT>
606
+ AGG_INLINE void apply_gamma_inv(const GammaLUT& gamma)
607
+ {
608
+ r = gamma.inv(r);
609
+ g = gamma.inv(g);
610
+ b = gamma.inv(b);
611
+ }
612
+
613
+ //--------------------------------------------------------------------
614
+ static self_type no_color() { return self_type(0,0,0,0); }
615
+
616
+ //--------------------------------------------------------------------
617
+ static self_type from_wavelength(double wl, double gamma = 1.0)
618
+ {
619
+ return self_type(rgba::from_wavelength(wl, gamma));
620
+ }
621
+ };
622
+
623
+ typedef rgba8T<linear> rgba8;
624
+ typedef rgba8T<sRGB> srgba8;
625
+
626
+
627
+ //-------------------------------------------------------------rgb8_packed
628
+ inline rgba8 rgb8_packed(unsigned v)
629
+ {
630
+ return rgba8((v >> 16) & 0xFF, (v >> 8) & 0xFF, v & 0xFF);
631
+ }
632
+
633
+ //-------------------------------------------------------------bgr8_packed
634
+ inline rgba8 bgr8_packed(unsigned v)
635
+ {
636
+ return rgba8(v & 0xFF, (v >> 8) & 0xFF, (v >> 16) & 0xFF);
637
+ }
638
+
639
+ //------------------------------------------------------------argb8_packed
640
+ inline rgba8 argb8_packed(unsigned v)
641
+ {
642
+ return rgba8((v >> 16) & 0xFF, (v >> 8) & 0xFF, v & 0xFF, v >> 24);
643
+ }
644
+
645
+ //---------------------------------------------------------rgba8_gamma_dir
646
+ template<class GammaLUT>
647
+ rgba8 rgba8_gamma_dir(rgba8 c, const GammaLUT& gamma)
648
+ {
649
+ return rgba8(gamma.dir(c.r), gamma.dir(c.g), gamma.dir(c.b), c.a);
650
+ }
651
+
652
+ //---------------------------------------------------------rgba8_gamma_inv
653
+ template<class GammaLUT>
654
+ rgba8 rgba8_gamma_inv(rgba8 c, const GammaLUT& gamma)
655
+ {
656
+ return rgba8(gamma.inv(c.r), gamma.inv(c.g), gamma.inv(c.b), c.a);
657
+ }
658
+
659
+
660
+
661
+ //==================================================================rgba16
662
+ struct rgba16
663
+ {
664
+ typedef int16u value_type;
665
+ typedef int32u calc_type;
666
+ typedef int64 long_type;
667
+ enum base_scale_e
668
+ {
669
+ base_shift = 16,
670
+ base_scale = 1 << base_shift,
671
+ base_mask = base_scale - 1,
672
+ base_MSB = 1 << (base_shift - 1)
673
+ };
674
+ typedef rgba16 self_type;
675
+
676
+ value_type r;
677
+ value_type g;
678
+ value_type b;
679
+ value_type a;
680
+
681
+ //--------------------------------------------------------------------
682
+ rgba16() {}
683
+
684
+ //--------------------------------------------------------------------
685
+ rgba16(unsigned r_, unsigned g_, unsigned b_, unsigned a_=base_mask) :
686
+ r(value_type(r_)),
687
+ g(value_type(g_)),
688
+ b(value_type(b_)),
689
+ a(value_type(a_)) {}
690
+
691
+ //--------------------------------------------------------------------
692
+ rgba16(const self_type& c, unsigned a_) :
693
+ r(c.r), g(c.g), b(c.b), a(value_type(a_)) {}
694
+
695
+ //--------------------------------------------------------------------
696
+ rgba16(const rgba& c) :
697
+ r((value_type)uround(c.r * double(base_mask))),
698
+ g((value_type)uround(c.g * double(base_mask))),
699
+ b((value_type)uround(c.b * double(base_mask))),
700
+ a((value_type)uround(c.a * double(base_mask))) {}
701
+
702
+ //--------------------------------------------------------------------
703
+ rgba16(const rgba8& c) :
704
+ r(value_type((value_type(c.r) << 8) | c.r)),
705
+ g(value_type((value_type(c.g) << 8) | c.g)),
706
+ b(value_type((value_type(c.b) << 8) | c.b)),
707
+ a(value_type((value_type(c.a) << 8) | c.a)) {}
708
+
709
+ //--------------------------------------------------------------------
710
+ rgba16(const srgba8& c) :
711
+ r(sRGB_conv<value_type>::rgb_from_sRGB(c.r)),
712
+ g(sRGB_conv<value_type>::rgb_from_sRGB(c.g)),
713
+ b(sRGB_conv<value_type>::rgb_from_sRGB(c.b)),
714
+ a(sRGB_conv<value_type>::alpha_from_sRGB(c.a)) {}
715
+
716
+ //--------------------------------------------------------------------
717
+ operator rgba() const
718
+ {
719
+ return rgba(
720
+ r / 65535.0,
721
+ g / 65535.0,
722
+ b / 65535.0,
723
+ a / 65535.0);
724
+ }
725
+
726
+ //--------------------------------------------------------------------
727
+ operator rgba8() const
728
+ {
729
+ return rgba8(r >> 8, g >> 8, b >> 8, a >> 8);
730
+ }
731
+
732
+ //--------------------------------------------------------------------
733
+ operator srgba8() const
734
+ {
735
+ // Return (non-premultiplied) sRGB values.
736
+ return srgba8(
737
+ sRGB_conv<value_type>::rgb_to_sRGB(r),
738
+ sRGB_conv<value_type>::rgb_to_sRGB(g),
739
+ sRGB_conv<value_type>::rgb_to_sRGB(b),
740
+ sRGB_conv<value_type>::alpha_to_sRGB(a));
741
+ }
742
+
743
+ //--------------------------------------------------------------------
744
+ static AGG_INLINE double to_double(value_type a)
745
+ {
746
+ return double(a) / base_mask;
747
+ }
748
+
749
+ //--------------------------------------------------------------------
750
+ static AGG_INLINE value_type from_double(double a)
751
+ {
752
+ return value_type(uround(a * base_mask));
753
+ }
754
+
755
+ //--------------------------------------------------------------------
756
+ static AGG_INLINE value_type empty_value()
757
+ {
758
+ return 0;
759
+ }
760
+
761
+ //--------------------------------------------------------------------
762
+ static AGG_INLINE value_type full_value()
763
+ {
764
+ return base_mask;
765
+ }
766
+
767
+ //--------------------------------------------------------------------
768
+ AGG_INLINE bool is_transparent() const
769
+ {
770
+ return a == 0;
771
+ }
772
+
773
+ //--------------------------------------------------------------------
774
+ AGG_INLINE bool is_opaque() const
775
+ {
776
+ return a == base_mask;
777
+ }
778
+
779
+ //--------------------------------------------------------------------
780
+ static AGG_INLINE value_type invert(value_type x)
781
+ {
782
+ return base_mask - x;
783
+ }
784
+
785
+ //--------------------------------------------------------------------
786
+ // Fixed-point multiply, exact over int16u.
787
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
788
+ {
789
+ calc_type t = a * b + base_MSB;
790
+ return value_type(((t >> base_shift) + t) >> base_shift);
791
+ }
792
+
793
+ //--------------------------------------------------------------------
794
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
795
+ {
796
+ if (a * b == 0)
797
+ {
798
+ return 0;
799
+ }
800
+ else if (a >= b)
801
+ {
802
+ return base_mask;
803
+ }
804
+ else return value_type((a * base_mask + (b >> 1)) / b);
805
+ }
806
+
807
+ //--------------------------------------------------------------------
808
+ template<typename T>
809
+ static AGG_INLINE T downscale(T a)
810
+ {
811
+ return a >> base_shift;
812
+ }
813
+
814
+ //--------------------------------------------------------------------
815
+ template<typename T>
816
+ static AGG_INLINE T downshift(T a, unsigned n)
817
+ {
818
+ return a >> n;
819
+ }
820
+
821
+ //--------------------------------------------------------------------
822
+ // Fixed-point multiply, almost exact over int16u.
823
+ // Specifically for multiplying a color component by a cover.
824
+ static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
825
+ {
826
+ return multiply(a, (b << 8) | b);
827
+ }
828
+
829
+ //--------------------------------------------------------------------
830
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
831
+ {
832
+ return multiply((a << 8) | a, b) >> 8;
833
+ }
834
+
835
+ //--------------------------------------------------------------------
836
+ // Interpolate p to q by a, assuming q is premultiplied by a.
837
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
838
+ {
839
+ return p + q - multiply(p, a);
840
+ }
841
+
842
+ //--------------------------------------------------------------------
843
+ // Interpolate p to q by a.
844
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
845
+ {
846
+ int t = (q - p) * a + base_MSB - (p > q);
847
+ return value_type(p + (((t >> base_shift) + t) >> base_shift));
848
+ }
849
+
850
+ //--------------------------------------------------------------------
851
+ self_type& clear()
852
+ {
853
+ r = g = b = a = 0;
854
+ return *this;
855
+ }
856
+
857
+ //--------------------------------------------------------------------
858
+ self_type& transparent()
859
+ {
860
+ a = 0;
861
+ return *this;
862
+ }
863
+
864
+ //--------------------------------------------------------------------
865
+ AGG_INLINE self_type& opacity(double a_)
866
+ {
867
+ if (a_ < 0) a = 0;
868
+ if (a_ > 1) a = 1;
869
+ a = value_type(uround(a_ * double(base_mask)));
870
+ return *this;
871
+ }
872
+
873
+ //--------------------------------------------------------------------
874
+ double opacity() const
875
+ {
876
+ return double(a) / double(base_mask);
877
+ }
878
+
879
+ //--------------------------------------------------------------------
880
+ AGG_INLINE self_type& premultiply()
881
+ {
882
+ if (a != base_mask)
883
+ {
884
+ if (a == 0)
885
+ {
886
+ r = g = b = 0;
887
+ }
888
+ else
889
+ {
890
+ r = multiply(r, a);
891
+ g = multiply(g, a);
892
+ b = multiply(b, a);
893
+ }
894
+ }
895
+ return *this;
896
+ }
897
+
898
+ //--------------------------------------------------------------------
899
+ AGG_INLINE self_type& premultiply(unsigned a_)
900
+ {
901
+ if (a < base_mask || a_ < base_mask)
902
+ {
903
+ if (a == 0 || a_ == 0)
904
+ {
905
+ r = g = b = a = 0;
906
+ }
907
+ else
908
+ {
909
+ calc_type r_ = (calc_type(r) * a_) / a;
910
+ calc_type g_ = (calc_type(g) * a_) / a;
911
+ calc_type b_ = (calc_type(b) * a_) / a;
912
+ r = value_type((r_ > a_) ? a_ : r_);
913
+ g = value_type((g_ > a_) ? a_ : g_);
914
+ b = value_type((b_ > a_) ? a_ : b_);
915
+ a = value_type(a_);
916
+ }
917
+ }
918
+ return *this;
919
+ }
920
+
921
+ //--------------------------------------------------------------------
922
+ AGG_INLINE self_type& demultiply()
923
+ {
924
+ if (a < base_mask)
925
+ {
926
+ if (a == 0)
927
+ {
928
+ r = g = b = 0;
929
+ }
930
+ else
931
+ {
932
+ calc_type r_ = (calc_type(r) * base_mask) / a;
933
+ calc_type g_ = (calc_type(g) * base_mask) / a;
934
+ calc_type b_ = (calc_type(b) * base_mask) / a;
935
+ r = value_type((r_ > calc_type(base_mask)) ? calc_type(base_mask) : r_);
936
+ g = value_type((g_ > calc_type(base_mask)) ? calc_type(base_mask) : g_);
937
+ b = value_type((b_ > calc_type(base_mask)) ? calc_type(base_mask) : b_);
938
+ }
939
+ }
940
+ return *this;
941
+ }
942
+
943
+ //--------------------------------------------------------------------
944
+ AGG_INLINE self_type gradient(const self_type& c, double k) const
945
+ {
946
+ self_type ret;
947
+ calc_type ik = uround(k * base_mask);
948
+ ret.r = lerp(r, c.r, ik);
949
+ ret.g = lerp(g, c.g, ik);
950
+ ret.b = lerp(b, c.b, ik);
951
+ ret.a = lerp(a, c.a, ik);
952
+ return ret;
953
+ }
954
+
955
+ //--------------------------------------------------------------------
956
+ AGG_INLINE void add(const self_type& c, unsigned cover)
957
+ {
958
+ calc_type cr, cg, cb, ca;
959
+ if (cover == cover_mask)
960
+ {
961
+ if (c.a == base_mask)
962
+ {
963
+ *this = c;
964
+ return;
965
+ }
966
+ else
967
+ {
968
+ cr = r + c.r;
969
+ cg = g + c.g;
970
+ cb = b + c.b;
971
+ ca = a + c.a;
972
+ }
973
+ }
974
+ else
975
+ {
976
+ cr = r + mult_cover(c.r, cover);
977
+ cg = g + mult_cover(c.g, cover);
978
+ cb = b + mult_cover(c.b, cover);
979
+ ca = a + mult_cover(c.a, cover);
980
+ }
981
+ r = (value_type)((cr > calc_type(base_mask)) ? calc_type(base_mask) : cr);
982
+ g = (value_type)((cg > calc_type(base_mask)) ? calc_type(base_mask) : cg);
983
+ b = (value_type)((cb > calc_type(base_mask)) ? calc_type(base_mask) : cb);
984
+ a = (value_type)((ca > calc_type(base_mask)) ? calc_type(base_mask) : ca);
985
+ }
986
+
987
+ //--------------------------------------------------------------------
988
+ template<class GammaLUT>
989
+ AGG_INLINE void apply_gamma_dir(const GammaLUT& gamma)
990
+ {
991
+ r = gamma.dir(r);
992
+ g = gamma.dir(g);
993
+ b = gamma.dir(b);
994
+ }
995
+
996
+ //--------------------------------------------------------------------
997
+ template<class GammaLUT>
998
+ AGG_INLINE void apply_gamma_inv(const GammaLUT& gamma)
999
+ {
1000
+ r = gamma.inv(r);
1001
+ g = gamma.inv(g);
1002
+ b = gamma.inv(b);
1003
+ }
1004
+
1005
+ //--------------------------------------------------------------------
1006
+ static self_type no_color() { return self_type(0,0,0,0); }
1007
+
1008
+ //--------------------------------------------------------------------
1009
+ static self_type from_wavelength(double wl, double gamma = 1.0)
1010
+ {
1011
+ return self_type(rgba::from_wavelength(wl, gamma));
1012
+ }
1013
+ };
1014
+
1015
+
1016
+ //------------------------------------------------------rgba16_gamma_dir
1017
+ template<class GammaLUT>
1018
+ rgba16 rgba16_gamma_dir(rgba16 c, const GammaLUT& gamma)
1019
+ {
1020
+ return rgba16(gamma.dir(c.r), gamma.dir(c.g), gamma.dir(c.b), c.a);
1021
+ }
1022
+
1023
+ //------------------------------------------------------rgba16_gamma_inv
1024
+ template<class GammaLUT>
1025
+ rgba16 rgba16_gamma_inv(rgba16 c, const GammaLUT& gamma)
1026
+ {
1027
+ return rgba16(gamma.inv(c.r), gamma.inv(c.g), gamma.inv(c.b), c.a);
1028
+ }
1029
+
1030
+ //====================================================================rgba32
1031
+ struct rgba32
1032
+ {
1033
+ typedef float value_type;
1034
+ typedef double calc_type;
1035
+ typedef double long_type;
1036
+ typedef rgba32 self_type;
1037
+
1038
+ value_type r;
1039
+ value_type g;
1040
+ value_type b;
1041
+ value_type a;
1042
+
1043
+ //--------------------------------------------------------------------
1044
+ rgba32() {}
1045
+
1046
+ //--------------------------------------------------------------------
1047
+ rgba32(value_type r_, value_type g_, value_type b_, value_type a_= 1) :
1048
+ r(r_), g(g_), b(b_), a(a_) {}
1049
+
1050
+ //--------------------------------------------------------------------
1051
+ rgba32(const self_type& c, float a_) :
1052
+ r(c.r), g(c.g), b(c.b), a(a_) {}
1053
+
1054
+ //--------------------------------------------------------------------
1055
+ rgba32(const rgba& c) :
1056
+ r(value_type(c.r)), g(value_type(c.g)), b(value_type(c.b)), a(value_type(c.a)) {}
1057
+
1058
+ //--------------------------------------------------------------------
1059
+ rgba32(const rgba8& c) :
1060
+ r(value_type(c.r / 255.0)),
1061
+ g(value_type(c.g / 255.0)),
1062
+ b(value_type(c.b / 255.0)),
1063
+ a(value_type(c.a / 255.0)) {}
1064
+
1065
+ //--------------------------------------------------------------------
1066
+ rgba32(const srgba8& c) :
1067
+ r(sRGB_conv<value_type>::rgb_from_sRGB(c.r)),
1068
+ g(sRGB_conv<value_type>::rgb_from_sRGB(c.g)),
1069
+ b(sRGB_conv<value_type>::rgb_from_sRGB(c.b)),
1070
+ a(sRGB_conv<value_type>::alpha_from_sRGB(c.a)) {}
1071
+
1072
+ //--------------------------------------------------------------------
1073
+ rgba32(const rgba16& c) :
1074
+ r(value_type(c.r / 65535.0)),
1075
+ g(value_type(c.g / 65535.0)),
1076
+ b(value_type(c.b / 65535.0)),
1077
+ a(value_type(c.a / 65535.0)) {}
1078
+
1079
+ //--------------------------------------------------------------------
1080
+ operator rgba() const
1081
+ {
1082
+ return rgba(r, g, b, a);
1083
+ }
1084
+
1085
+ //--------------------------------------------------------------------
1086
+ operator rgba8() const
1087
+ {
1088
+ return rgba8(
1089
+ uround(r * 255.0),
1090
+ uround(g * 255.0),
1091
+ uround(b * 255.0),
1092
+ uround(a * 255.0));
1093
+ }
1094
+
1095
+ //--------------------------------------------------------------------
1096
+ operator srgba8() const
1097
+ {
1098
+ return srgba8(
1099
+ sRGB_conv<value_type>::rgb_to_sRGB(r),
1100
+ sRGB_conv<value_type>::rgb_to_sRGB(g),
1101
+ sRGB_conv<value_type>::rgb_to_sRGB(b),
1102
+ sRGB_conv<value_type>::alpha_to_sRGB(a));
1103
+ }
1104
+
1105
+ //--------------------------------------------------------------------
1106
+ operator rgba16() const
1107
+ {
1108
+ return rgba8(
1109
+ uround(r * 65535.0),
1110
+ uround(g * 65535.0),
1111
+ uround(b * 65535.0),
1112
+ uround(a * 65535.0));
1113
+ }
1114
+
1115
+ //--------------------------------------------------------------------
1116
+ static AGG_INLINE double to_double(value_type a)
1117
+ {
1118
+ return a;
1119
+ }
1120
+
1121
+ //--------------------------------------------------------------------
1122
+ static AGG_INLINE value_type from_double(double a)
1123
+ {
1124
+ return value_type(a);
1125
+ }
1126
+
1127
+ //--------------------------------------------------------------------
1128
+ static AGG_INLINE value_type empty_value()
1129
+ {
1130
+ return 0;
1131
+ }
1132
+
1133
+ //--------------------------------------------------------------------
1134
+ static AGG_INLINE value_type full_value()
1135
+ {
1136
+ return 1;
1137
+ }
1138
+
1139
+ //--------------------------------------------------------------------
1140
+ AGG_INLINE bool is_transparent() const
1141
+ {
1142
+ return a <= 0;
1143
+ }
1144
+
1145
+ //--------------------------------------------------------------------
1146
+ AGG_INLINE bool is_opaque() const
1147
+ {
1148
+ return a >= 1;
1149
+ }
1150
+
1151
+ //--------------------------------------------------------------------
1152
+ static AGG_INLINE value_type invert(value_type x)
1153
+ {
1154
+ return 1 - x;
1155
+ }
1156
+
1157
+ //--------------------------------------------------------------------
1158
+ static AGG_INLINE value_type multiply(value_type a, value_type b)
1159
+ {
1160
+ return value_type(a * b);
1161
+ }
1162
+
1163
+ //--------------------------------------------------------------------
1164
+ static AGG_INLINE value_type demultiply(value_type a, value_type b)
1165
+ {
1166
+ return (b == 0) ? 0 : value_type(a / b);
1167
+ }
1168
+
1169
+ //--------------------------------------------------------------------
1170
+ template<typename T>
1171
+ static AGG_INLINE T downscale(T a)
1172
+ {
1173
+ return a;
1174
+ }
1175
+
1176
+ //--------------------------------------------------------------------
1177
+ template<typename T>
1178
+ static AGG_INLINE T downshift(T a, unsigned n)
1179
+ {
1180
+ return n > 0 ? a / (1 << n) : a;
1181
+ }
1182
+
1183
+ //--------------------------------------------------------------------
1184
+ static AGG_INLINE value_type mult_cover(value_type a, cover_type b)
1185
+ {
1186
+ return value_type(a * b / cover_mask);
1187
+ }
1188
+
1189
+ //--------------------------------------------------------------------
1190
+ static AGG_INLINE cover_type scale_cover(cover_type a, value_type b)
1191
+ {
1192
+ return cover_type(uround(a * b));
1193
+ }
1194
+
1195
+ //--------------------------------------------------------------------
1196
+ // Interpolate p to q by a, assuming q is premultiplied by a.
1197
+ static AGG_INLINE value_type prelerp(value_type p, value_type q, value_type a)
1198
+ {
1199
+ return (1 - a) * p + q; // more accurate than "p + q - p * a"
1200
+ }
1201
+
1202
+ //--------------------------------------------------------------------
1203
+ // Interpolate p to q by a.
1204
+ static AGG_INLINE value_type lerp(value_type p, value_type q, value_type a)
1205
+ {
1206
+ // The form "p + a * (q - p)" avoids a multiplication, but may produce an
1207
+ // inaccurate result. For example, "p + (q - p)" may not be exactly equal
1208
+ // to q. Therefore, stick to the basic expression, which at least produces
1209
+ // the correct result at either extreme.
1210
+ return (1 - a) * p + a * q;
1211
+ }
1212
+
1213
+ //--------------------------------------------------------------------
1214
+ self_type& clear()
1215
+ {
1216
+ r = g = b = a = 0;
1217
+ return *this;
1218
+ }
1219
+
1220
+ //--------------------------------------------------------------------
1221
+ self_type& transparent()
1222
+ {
1223
+ a = 0;
1224
+ return *this;
1225
+ }
1226
+
1227
+ //--------------------------------------------------------------------
1228
+ AGG_INLINE self_type& opacity(double a_)
1229
+ {
1230
+ if (a_ < 0) a = 0;
1231
+ else if (a_ > 1) a = 1;
1232
+ else a = value_type(a_);
1233
+ return *this;
1234
+ }
1235
+
1236
+ //--------------------------------------------------------------------
1237
+ double opacity() const
1238
+ {
1239
+ return a;
1240
+ }
1241
+
1242
+ //--------------------------------------------------------------------
1243
+ AGG_INLINE self_type& premultiply()
1244
+ {
1245
+ if (a < 1)
1246
+ {
1247
+ if (a <= 0)
1248
+ {
1249
+ r = g = b = 0;
1250
+ }
1251
+ else
1252
+ {
1253
+ r *= a;
1254
+ g *= a;
1255
+ b *= a;
1256
+ }
1257
+ }
1258
+ return *this;
1259
+ }
1260
+
1261
+ //--------------------------------------------------------------------
1262
+ AGG_INLINE self_type& demultiply()
1263
+ {
1264
+ if (a < 1)
1265
+ {
1266
+ if (a <= 0)
1267
+ {
1268
+ r = g = b = 0;
1269
+ }
1270
+ else
1271
+ {
1272
+ r /= a;
1273
+ g /= a;
1274
+ b /= a;
1275
+ }
1276
+ }
1277
+ return *this;
1278
+ }
1279
+
1280
+ //--------------------------------------------------------------------
1281
+ AGG_INLINE self_type gradient(const self_type& c, double k) const
1282
+ {
1283
+ self_type ret;
1284
+ ret.r = value_type(r + (c.r - r) * k);
1285
+ ret.g = value_type(g + (c.g - g) * k);
1286
+ ret.b = value_type(b + (c.b - b) * k);
1287
+ ret.a = value_type(a + (c.a - a) * k);
1288
+ return ret;
1289
+ }
1290
+
1291
+ //--------------------------------------------------------------------
1292
+ AGG_INLINE void add(const self_type& c, unsigned cover)
1293
+ {
1294
+ if (cover == cover_mask)
1295
+ {
1296
+ if (c.is_opaque())
1297
+ {
1298
+ *this = c;
1299
+ return;
1300
+ }
1301
+ else
1302
+ {
1303
+ r += c.r;
1304
+ g += c.g;
1305
+ b += c.b;
1306
+ a += c.a;
1307
+ }
1308
+ }
1309
+ else
1310
+ {
1311
+ r += mult_cover(c.r, cover);
1312
+ g += mult_cover(c.g, cover);
1313
+ b += mult_cover(c.b, cover);
1314
+ a += mult_cover(c.a, cover);
1315
+ }
1316
+ if (a > 1) a = 1;
1317
+ if (r > a) r = a;
1318
+ if (g > a) g = a;
1319
+ if (b > a) b = a;
1320
+ }
1321
+
1322
+ //--------------------------------------------------------------------
1323
+ template<class GammaLUT>
1324
+ AGG_INLINE void apply_gamma_dir(const GammaLUT& gamma)
1325
+ {
1326
+ r = gamma.dir(r);
1327
+ g = gamma.dir(g);
1328
+ b = gamma.dir(b);
1329
+ }
1330
+
1331
+ //--------------------------------------------------------------------
1332
+ template<class GammaLUT>
1333
+ AGG_INLINE void apply_gamma_inv(const GammaLUT& gamma)
1334
+ {
1335
+ r = gamma.inv(r);
1336
+ g = gamma.inv(g);
1337
+ b = gamma.inv(b);
1338
+ }
1339
+
1340
+ //--------------------------------------------------------------------
1341
+ static self_type no_color() { return self_type(0,0,0,0); }
1342
+
1343
+ //--------------------------------------------------------------------
1344
+ static self_type from_wavelength(double wl, double gamma = 1)
1345
+ {
1346
+ return self_type(rgba::from_wavelength(wl, gamma));
1347
+ }
1348
+ };
1349
+ }
1350
+
1351
+
1352
+
1353
+ #endif
data/bundled_deps/agg/agg/agg_config.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef AGG_CONFIG_INCLUDED
2
+ #define AGG_CONFIG_INCLUDED
3
+
4
+ // This file can be used to redefine certain data types.
5
+
6
+ //---------------------------------------
7
+ // 1. Default basic types such as:
8
+ //
9
+ // AGG_INT8
10
+ // AGG_INT8U
11
+ // AGG_INT16
12
+ // AGG_INT16U
13
+ // AGG_INT32
14
+ // AGG_INT32U
15
+ // AGG_INT64
16
+ // AGG_INT64U
17
+ //
18
+ // Just replace this file with new defines if necessary.
19
+ // For example, if your compiler doesn't have a 64 bit integer type
20
+ // you can still use AGG if you define the follows:
21
+ //
22
+ // #define AGG_INT64 int
23
+ // #define AGG_INT64U unsigned
24
+ //
25
+ // It will result in overflow in 16 bit-per-component image/pattern resampling
26
+ // but it won't result any crash and the rest of the library will remain
27
+ // fully functional.
28
+
29
+
30
+ //---------------------------------------
31
+ // 2. Default rendering_buffer type. Can be:
32
+ //
33
+ // Provides faster access for massive pixel operations,
34
+ // such as blur, image filtering:
35
+ // #define AGG_RENDERING_BUFFER row_ptr_cache<int8u>
36
+ //
37
+ // Provides cheaper creation and destruction (no mem allocs):
38
+ // #define AGG_RENDERING_BUFFER row_accessor<int8u>
39
+ //
40
+ // You can still use both of them simultaneously in your applications
41
+ // This #define is used only for default rendering_buffer type,
42
+ // in short hand typedefs like pixfmt_rgba32.
43
+
44
+ #endif
data/bundled_deps/agg/agg/agg_conv_transform.h ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+ //
16
+ // class conv_transform
17
+ //
18
+ //----------------------------------------------------------------------------
19
+ #ifndef AGG_CONV_TRANSFORM_INCLUDED
20
+ #define AGG_CONV_TRANSFORM_INCLUDED
21
+
22
+ #include "agg_basics.h"
23
+ #include "agg_trans_affine.h"
24
+
25
+ namespace agg
26
+ {
27
+
28
+ //----------------------------------------------------------conv_transform
29
+ template<class VertexSource, class Transformer=trans_affine> class conv_transform
30
+ {
31
+ public:
32
+ conv_transform(VertexSource& source, Transformer& tr) :
33
+ m_source(&source), m_trans(&tr) {}
34
+ void attach(VertexSource& source) { m_source = &source; }
35
+
36
+ void rewind(unsigned path_id)
37
+ {
38
+ m_source->rewind(path_id);
39
+ }
40
+
41
+ unsigned vertex(double* x, double* y)
42
+ {
43
+ unsigned cmd = m_source->vertex(x, y);
44
+ if(is_vertex(cmd))
45
+ {
46
+ m_trans->transform(x, y);
47
+ }
48
+ return cmd;
49
+ }
50
+
51
+ void transformer(Transformer& tr)
52
+ {
53
+ m_trans = &tr;
54
+ }
55
+
56
+ private:
57
+ conv_transform(const conv_transform<VertexSource>&);
58
+ const conv_transform<VertexSource>&
59
+ operator = (const conv_transform<VertexSource>&);
60
+
61
+ VertexSource* m_source;
62
+ Transformer* m_trans;
63
+ };
64
+
65
+
66
+ }
67
+
68
+ #endif
data/bundled_deps/agg/agg/agg_gamma_functions.h ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+
16
+ #ifndef AGG_GAMMA_FUNCTIONS_INCLUDED
17
+ #define AGG_GAMMA_FUNCTIONS_INCLUDED
18
+
19
+ #include <math.h>
20
+ #include "agg_basics.h"
21
+
22
+ namespace agg
23
+ {
24
+ //===============================================================gamma_none
25
+ struct gamma_none
26
+ {
27
+ double operator()(double x) const { return x; }
28
+ };
29
+
30
+
31
+ //==============================================================gamma_power
32
+ class gamma_power
33
+ {
34
+ public:
35
+ gamma_power() : m_gamma(1.0) {}
36
+ gamma_power(double g) : m_gamma(g) {}
37
+
38
+ void gamma(double g) { m_gamma = g; }
39
+ double gamma() const { return m_gamma; }
40
+
41
+ double operator() (double x) const
42
+ {
43
+ return pow(x, m_gamma);
44
+ }
45
+
46
+ private:
47
+ double m_gamma;
48
+ };
49
+
50
+
51
+ //==========================================================gamma_threshold
52
+ class gamma_threshold
53
+ {
54
+ public:
55
+ gamma_threshold() : m_threshold(0.5) {}
56
+ gamma_threshold(double t) : m_threshold(t) {}
57
+
58
+ void threshold(double t) { m_threshold = t; }
59
+ double threshold() const { return m_threshold; }
60
+
61
+ double operator() (double x) const
62
+ {
63
+ return (x < m_threshold) ? 0.0 : 1.0;
64
+ }
65
+
66
+ private:
67
+ double m_threshold;
68
+ };
69
+
70
+
71
+ //============================================================gamma_linear
72
+ class gamma_linear
73
+ {
74
+ public:
75
+ gamma_linear() : m_start(0.0), m_end(1.0) {}
76
+ gamma_linear(double s, double e) : m_start(s), m_end(e) {}
77
+
78
+ void set(double s, double e) { m_start = s; m_end = e; }
79
+ void start(double s) { m_start = s; }
80
+ void end(double e) { m_end = e; }
81
+ double start() const { return m_start; }
82
+ double end() const { return m_end; }
83
+
84
+ double operator() (double x) const
85
+ {
86
+ if(x < m_start) return 0.0;
87
+ if(x > m_end) return 1.0;
88
+ return (x - m_start) / (m_end - m_start);
89
+ }
90
+
91
+ private:
92
+ double m_start;
93
+ double m_end;
94
+ };
95
+
96
+
97
+ //==========================================================gamma_multiply
98
+ class gamma_multiply
99
+ {
100
+ public:
101
+ gamma_multiply() : m_mul(1.0) {}
102
+ gamma_multiply(double v) : m_mul(v) {}
103
+
104
+ void value(double v) { m_mul = v; }
105
+ double value() const { return m_mul; }
106
+
107
+ double operator() (double x) const
108
+ {
109
+ double y = x * m_mul;
110
+ if(y > 1.0) y = 1.0;
111
+ return y;
112
+ }
113
+
114
+ private:
115
+ double m_mul;
116
+ };
117
+
118
+ inline double sRGB_to_linear(double x)
119
+ {
120
+ return (x <= 0.04045) ? (x / 12.92) : pow((x + 0.055) / (1.055), 2.4);
121
+ }
122
+
123
+ inline double linear_to_sRGB(double x)
124
+ {
125
+ return (x <= 0.0031308) ? (x * 12.92) : (1.055 * pow(x, 1 / 2.4) - 0.055);
126
+ }
127
+ }
128
+
129
+ #endif
130
+
131
+
132
+
data/bundled_deps/agg/agg/agg_gamma_lut.h ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+
16
+ #ifndef AGG_GAMMA_LUT_INCLUDED
17
+ #define AGG_GAMMA_LUT_INCLUDED
18
+
19
+ #include <math.h>
20
+ #include "agg_basics.h"
21
+ #include "agg_gamma_functions.h"
22
+
23
+ namespace agg
24
+ {
25
+ template<class LoResT=int8u,
26
+ class HiResT=int8u,
27
+ unsigned GammaShift=8,
28
+ unsigned HiResShift=8> class gamma_lut
29
+ {
30
+ public:
31
+ typedef gamma_lut<LoResT, HiResT, GammaShift, HiResShift> self_type;
32
+
33
+ enum gamma_scale_e
34
+ {
35
+ gamma_shift = GammaShift,
36
+ gamma_size = 1 << gamma_shift,
37
+ gamma_mask = gamma_size - 1
38
+ };
39
+
40
+ enum hi_res_scale_e
41
+ {
42
+ hi_res_shift = HiResShift,
43
+ hi_res_size = 1 << hi_res_shift,
44
+ hi_res_mask = hi_res_size - 1
45
+ };
46
+
47
+ ~gamma_lut()
48
+ {
49
+ pod_allocator<LoResT>::deallocate(m_inv_gamma, hi_res_size);
50
+ pod_allocator<HiResT>::deallocate(m_dir_gamma, gamma_size);
51
+ }
52
+
53
+ gamma_lut() :
54
+ m_gamma(1.0),
55
+ m_dir_gamma(pod_allocator<HiResT>::allocate(gamma_size)),
56
+ m_inv_gamma(pod_allocator<LoResT>::allocate(hi_res_size))
57
+ {
58
+ unsigned i;
59
+ for(i = 0; i < gamma_size; i++)
60
+ {
61
+ m_dir_gamma[i] = HiResT(i << (hi_res_shift - gamma_shift));
62
+ }
63
+
64
+ for(i = 0; i < hi_res_size; i++)
65
+ {
66
+ m_inv_gamma[i] = LoResT(i >> (hi_res_shift - gamma_shift));
67
+ }
68
+ }
69
+
70
+ gamma_lut(double g) :
71
+ m_gamma(1.0),
72
+ m_dir_gamma(pod_allocator<HiResT>::allocate(gamma_size)),
73
+ m_inv_gamma(pod_allocator<LoResT>::allocate(hi_res_size))
74
+ {
75
+ gamma(g);
76
+ }
77
+
78
+ void gamma(double g)
79
+ {
80
+ m_gamma = g;
81
+
82
+ unsigned i;
83
+ for(i = 0; i < gamma_size; i++)
84
+ {
85
+ m_dir_gamma[i] = (HiResT)
86
+ uround(pow(i / double(gamma_mask), m_gamma) * double(hi_res_mask));
87
+ }
88
+
89
+ double inv_g = 1.0 / g;
90
+ for(i = 0; i < hi_res_size; i++)
91
+ {
92
+ m_inv_gamma[i] = (LoResT)
93
+ uround(pow(i / double(hi_res_mask), inv_g) * double(gamma_mask));
94
+ }
95
+ }
96
+
97
+ double gamma() const
98
+ {
99
+ return m_gamma;
100
+ }
101
+
102
+ HiResT dir(LoResT v) const
103
+ {
104
+ return m_dir_gamma[unsigned(v)];
105
+ }
106
+
107
+ LoResT inv(HiResT v) const
108
+ {
109
+ return m_inv_gamma[unsigned(v)];
110
+ }
111
+
112
+ private:
113
+ gamma_lut(const self_type&);
114
+ const self_type& operator = (const self_type&);
115
+
116
+ double m_gamma;
117
+ HiResT* m_dir_gamma;
118
+ LoResT* m_inv_gamma;
119
+ };
120
+
121
+ //
122
+ // sRGB support classes
123
+ //
124
+
125
+ // sRGB_lut - implements sRGB conversion for the various types.
126
+ // Base template is undefined, specializations are provided below.
127
+ template<class LinearType>
128
+ class sRGB_lut;
129
+
130
+ template<>
131
+ class sRGB_lut<float>
132
+ {
133
+ public:
134
+ sRGB_lut()
135
+ {
136
+ // Generate lookup tables.
137
+ for (int i = 0; i <= 255; ++i)
138
+ {
139
+ m_dir_table[i] = float(sRGB_to_linear(i / 255.0));
140
+ }
141
+ for (int i = 0; i <= 65535; ++i)
142
+ {
143
+ m_inv_table[i] = uround(255.0 * linear_to_sRGB(i / 65535.0));
144
+ }
145
+ }
146
+
147
+ float dir(int8u v) const
148
+ {
149
+ return m_dir_table[v];
150
+ }
151
+
152
+ int8u inv(float v) const
153
+ {
154
+ return m_inv_table[int16u(0.5 + v * 65535)];
155
+ }
156
+
157
+ private:
158
+ float m_dir_table[256];
159
+ int8u m_inv_table[65536];
160
+ };
161
+
162
+ template<>
163
+ class sRGB_lut<int16u>
164
+ {
165
+ public:
166
+ sRGB_lut()
167
+ {
168
+ // Generate lookup tables.
169
+ for (int i = 0; i <= 255; ++i)
170
+ {
171
+ m_dir_table[i] = uround(65535.0 * sRGB_to_linear(i / 255.0));
172
+ }
173
+ for (int i = 0; i <= 65535; ++i)
174
+ {
175
+ m_inv_table[i] = uround(255.0 * linear_to_sRGB(i / 65535.0));
176
+ }
177
+ }
178
+
179
+ int16u dir(int8u v) const
180
+ {
181
+ return m_dir_table[v];
182
+ }
183
+
184
+ int8u inv(int16u v) const
185
+ {
186
+ return m_inv_table[v];
187
+ }
188
+
189
+ private:
190
+ int16u m_dir_table[256];
191
+ int8u m_inv_table[65536];
192
+ };
193
+
194
+ template<>
195
+ class sRGB_lut<int8u>
196
+ {
197
+ public:
198
+ sRGB_lut()
199
+ {
200
+ // Generate lookup tables.
201
+ for (int i = 0; i <= 255; ++i)
202
+ {
203
+ m_dir_table[i] = uround(255.0 * sRGB_to_linear(i / 255.0));
204
+ m_inv_table[i] = uround(255.0 * linear_to_sRGB(i / 255.0));
205
+ }
206
+ }
207
+
208
+ int8u dir(int8u v) const
209
+ {
210
+ return m_dir_table[v];
211
+ }
212
+
213
+ int8u inv(int8u v) const
214
+ {
215
+ return m_inv_table[v];
216
+ }
217
+
218
+ private:
219
+ int8u m_dir_table[256];
220
+ int8u m_inv_table[256];
221
+ };
222
+
223
+ // Common base class for sRGB_conv objects. Defines an internal
224
+ // sRGB_lut object so that users don't have to.
225
+ template<class T>
226
+ class sRGB_conv_base
227
+ {
228
+ public:
229
+ static T rgb_from_sRGB(int8u x)
230
+ {
231
+ return lut.dir(x);
232
+ }
233
+
234
+ static int8u rgb_to_sRGB(T x)
235
+ {
236
+ return lut.inv(x);
237
+ }
238
+
239
+ private:
240
+ static sRGB_lut<T> lut;
241
+ };
242
+
243
+ // Definition of sRGB_conv_base::lut. Due to the fact that this a template,
244
+ // we don't need to place the definition in a cpp file. Hurrah.
245
+ template<class T>
246
+ sRGB_lut<T> sRGB_conv_base<T>::lut;
247
+
248
+ // Wrapper for sRGB-linear conversion.
249
+ // Base template is undefined, specializations are provided below.
250
+ template<class T>
251
+ class sRGB_conv;
252
+
253
+ template<>
254
+ class sRGB_conv<float> : public sRGB_conv_base<float>
255
+ {
256
+ public:
257
+ static float alpha_from_sRGB(int8u x)
258
+ {
259
+ static const double y = 1 / 255.0;
260
+ return float(x * y);
261
+ }
262
+
263
+ static int8u alpha_to_sRGB(float x)
264
+ {
265
+ return int8u(0.5 + x * 255);
266
+ }
267
+ };
268
+
269
+ template<>
270
+ class sRGB_conv<int16u> : public sRGB_conv_base<int16u>
271
+ {
272
+ public:
273
+ static int16u alpha_from_sRGB(int8u x)
274
+ {
275
+ return (x << 8) | x;
276
+ }
277
+
278
+ static int8u alpha_to_sRGB(int16u x)
279
+ {
280
+ return x >> 8;
281
+ }
282
+ };
283
+
284
+ template<>
285
+ class sRGB_conv<int8u> : public sRGB_conv_base<int8u>
286
+ {
287
+ public:
288
+ static int8u alpha_from_sRGB(int8u x)
289
+ {
290
+ return x;
291
+ }
292
+
293
+ static int8u alpha_to_sRGB(int8u x)
294
+ {
295
+ return x;
296
+ }
297
+ };
298
+ }
299
+
300
+ #endif
data/bundled_deps/agg/agg/agg_math.h ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+ // Bessel function (besj) was adapted for use in AGG library by Andy Wilk
16
+ // Contact: [email protected]
17
+ //----------------------------------------------------------------------------
18
+
19
+ #ifndef AGG_MATH_INCLUDED
20
+ #define AGG_MATH_INCLUDED
21
+
22
+ #include <math.h>
23
+ #include "agg_basics.h"
24
+
25
+ namespace agg
26
+ {
27
+
28
+ //------------------------------------------------------vertex_dist_epsilon
29
+ // Coinciding points maximal distance (Epsilon)
30
+ const double vertex_dist_epsilon = 1e-14;
31
+
32
+ //-----------------------------------------------------intersection_epsilon
33
+ // See calc_intersection
34
+ const double intersection_epsilon = 1.0e-30;
35
+
36
+ //------------------------------------------------------------cross_product
37
+ AGG_INLINE double cross_product(double x1, double y1,
38
+ double x2, double y2,
39
+ double x, double y)
40
+ {
41
+ return (x - x2) * (y2 - y1) - (y - y2) * (x2 - x1);
42
+ }
43
+
44
+ //--------------------------------------------------------point_in_triangle
45
+ AGG_INLINE bool point_in_triangle(double x1, double y1,
46
+ double x2, double y2,
47
+ double x3, double y3,
48
+ double x, double y)
49
+ {
50
+ bool cp1 = cross_product(x1, y1, x2, y2, x, y) < 0.0;
51
+ bool cp2 = cross_product(x2, y2, x3, y3, x, y) < 0.0;
52
+ bool cp3 = cross_product(x3, y3, x1, y1, x, y) < 0.0;
53
+ return cp1 == cp2 && cp2 == cp3 && cp3 == cp1;
54
+ }
55
+
56
+ //-----------------------------------------------------------calc_distance
57
+ AGG_INLINE double calc_distance(double x1, double y1, double x2, double y2)
58
+ {
59
+ double dx = x2-x1;
60
+ double dy = y2-y1;
61
+ return sqrt(dx * dx + dy * dy);
62
+ }
63
+
64
+ //--------------------------------------------------------calc_sq_distance
65
+ AGG_INLINE double calc_sq_distance(double x1, double y1, double x2, double y2)
66
+ {
67
+ double dx = x2-x1;
68
+ double dy = y2-y1;
69
+ return dx * dx + dy * dy;
70
+ }
71
+
72
+ //------------------------------------------------calc_line_point_distance
73
+ AGG_INLINE double calc_line_point_distance(double x1, double y1,
74
+ double x2, double y2,
75
+ double x, double y)
76
+ {
77
+ double dx = x2-x1;
78
+ double dy = y2-y1;
79
+ double d = sqrt(dx * dx + dy * dy);
80
+ if(d < vertex_dist_epsilon)
81
+ {
82
+ return calc_distance(x1, y1, x, y);
83
+ }
84
+ return ((x - x2) * dy - (y - y2) * dx) / d;
85
+ }
86
+
87
+ //-------------------------------------------------------calc_line_point_u
88
+ AGG_INLINE double calc_segment_point_u(double x1, double y1,
89
+ double x2, double y2,
90
+ double x, double y)
91
+ {
92
+ double dx = x2 - x1;
93
+ double dy = y2 - y1;
94
+
95
+ if(dx == 0 && dy == 0)
96
+ {
97
+ return 0;
98
+ }
99
+
100
+ double pdx = x - x1;
101
+ double pdy = y - y1;
102
+
103
+ return (pdx * dx + pdy * dy) / (dx * dx + dy * dy);
104
+ }
105
+
106
+ //---------------------------------------------calc_line_point_sq_distance
107
+ AGG_INLINE double calc_segment_point_sq_distance(double x1, double y1,
108
+ double x2, double y2,
109
+ double x, double y,
110
+ double u)
111
+ {
112
+ if(u <= 0)
113
+ {
114
+ return calc_sq_distance(x, y, x1, y1);
115
+ }
116
+ else
117
+ if(u >= 1)
118
+ {
119
+ return calc_sq_distance(x, y, x2, y2);
120
+ }
121
+ return calc_sq_distance(x, y, x1 + u * (x2 - x1), y1 + u * (y2 - y1));
122
+ }
123
+
124
+ //---------------------------------------------calc_line_point_sq_distance
125
+ AGG_INLINE double calc_segment_point_sq_distance(double x1, double y1,
126
+ double x2, double y2,
127
+ double x, double y)
128
+ {
129
+ return
130
+ calc_segment_point_sq_distance(
131
+ x1, y1, x2, y2, x, y,
132
+ calc_segment_point_u(x1, y1, x2, y2, x, y));
133
+ }
134
+
135
+ //-------------------------------------------------------calc_intersection
136
+ AGG_INLINE bool calc_intersection(double ax, double ay, double bx, double by,
137
+ double cx, double cy, double dx, double dy,
138
+ double* x, double* y)
139
+ {
140
+ double num = (ay-cy) * (dx-cx) - (ax-cx) * (dy-cy);
141
+ double den = (bx-ax) * (dy-cy) - (by-ay) * (dx-cx);
142
+ if(fabs(den) < intersection_epsilon) return false;
143
+ double r = num / den;
144
+ *x = ax + r * (bx-ax);
145
+ *y = ay + r * (by-ay);
146
+ return true;
147
+ }
148
+
149
+ //-----------------------------------------------------intersection_exists
150
+ AGG_INLINE bool intersection_exists(double x1, double y1, double x2, double y2,
151
+ double x3, double y3, double x4, double y4)
152
+ {
153
+ // It's less expensive but you can't control the
154
+ // boundary conditions: Less or LessEqual
155
+ double dx1 = x2 - x1;
156
+ double dy1 = y2 - y1;
157
+ double dx2 = x4 - x3;
158
+ double dy2 = y4 - y3;
159
+ return ((x3 - x2) * dy1 - (y3 - y2) * dx1 < 0.0) !=
160
+ ((x4 - x2) * dy1 - (y4 - y2) * dx1 < 0.0) &&
161
+ ((x1 - x4) * dy2 - (y1 - y4) * dx2 < 0.0) !=
162
+ ((x2 - x4) * dy2 - (y2 - y4) * dx2 < 0.0);
163
+
164
+ // It's is more expensive but more flexible
165
+ // in terms of boundary conditions.
166
+ //--------------------
167
+ //double den = (x2-x1) * (y4-y3) - (y2-y1) * (x4-x3);
168
+ //if(fabs(den) < intersection_epsilon) return false;
169
+ //double nom1 = (x4-x3) * (y1-y3) - (y4-y3) * (x1-x3);
170
+ //double nom2 = (x2-x1) * (y1-y3) - (y2-y1) * (x1-x3);
171
+ //double ua = nom1 / den;
172
+ //double ub = nom2 / den;
173
+ //return ua >= 0.0 && ua <= 1.0 && ub >= 0.0 && ub <= 1.0;
174
+ }
175
+
176
+ //--------------------------------------------------------calc_orthogonal
177
+ AGG_INLINE void calc_orthogonal(double thickness,
178
+ double x1, double y1,
179
+ double x2, double y2,
180
+ double* x, double* y)
181
+ {
182
+ double dx = x2 - x1;
183
+ double dy = y2 - y1;
184
+ double d = sqrt(dx*dx + dy*dy);
185
+ *x = thickness * dy / d;
186
+ *y = -thickness * dx / d;
187
+ }
188
+
189
+ //--------------------------------------------------------dilate_triangle
190
+ AGG_INLINE void dilate_triangle(double x1, double y1,
191
+ double x2, double y2,
192
+ double x3, double y3,
193
+ double *x, double* y,
194
+ double d)
195
+ {
196
+ double dx1=0.0;
197
+ double dy1=0.0;
198
+ double dx2=0.0;
199
+ double dy2=0.0;
200
+ double dx3=0.0;
201
+ double dy3=0.0;
202
+ double loc = cross_product(x1, y1, x2, y2, x3, y3);
203
+ if(fabs(loc) > intersection_epsilon)
204
+ {
205
+ if(cross_product(x1, y1, x2, y2, x3, y3) > 0.0)
206
+ {
207
+ d = -d;
208
+ }
209
+ calc_orthogonal(d, x1, y1, x2, y2, &dx1, &dy1);
210
+ calc_orthogonal(d, x2, y2, x3, y3, &dx2, &dy2);
211
+ calc_orthogonal(d, x3, y3, x1, y1, &dx3, &dy3);
212
+ }
213
+ *x++ = x1 + dx1; *y++ = y1 + dy1;
214
+ *x++ = x2 + dx1; *y++ = y2 + dy1;
215
+ *x++ = x2 + dx2; *y++ = y2 + dy2;
216
+ *x++ = x3 + dx2; *y++ = y3 + dy2;
217
+ *x++ = x3 + dx3; *y++ = y3 + dy3;
218
+ *x++ = x1 + dx3; *y++ = y1 + dy3;
219
+ }
220
+
221
+ //------------------------------------------------------calc_triangle_area
222
+ AGG_INLINE double calc_triangle_area(double x1, double y1,
223
+ double x2, double y2,
224
+ double x3, double y3)
225
+ {
226
+ return (x1*y2 - x2*y1 + x2*y3 - x3*y2 + x3*y1 - x1*y3) * 0.5;
227
+ }
228
+
229
+ //-------------------------------------------------------calc_polygon_area
230
+ template<class Storage> double calc_polygon_area(const Storage& st)
231
+ {
232
+ unsigned i;
233
+ double sum = 0.0;
234
+ double x = st[0].x;
235
+ double y = st[0].y;
236
+ double xs = x;
237
+ double ys = y;
238
+
239
+ for(i = 1; i < st.size(); i++)
240
+ {
241
+ const typename Storage::value_type& v = st[i];
242
+ sum += x * v.y - y * v.x;
243
+ x = v.x;
244
+ y = v.y;
245
+ }
246
+ return (sum + x * ys - y * xs) * 0.5;
247
+ }
248
+
249
+ //------------------------------------------------------------------------
250
+ // Tables for fast sqrt
251
+ extern int16u g_sqrt_table[1024];
252
+ extern int8 g_elder_bit_table[256];
253
+
254
+
255
+ //---------------------------------------------------------------fast_sqrt
256
+ //Fast integer Sqrt - really fast: no cycles, divisions or multiplications
257
+ #if defined(_MSC_VER)
258
+ #pragma warning(push)
259
+ #pragma warning(disable : 4035) //Disable warning "no return value"
260
+ #endif
261
+ AGG_INLINE unsigned fast_sqrt(unsigned val)
262
+ {
263
+ #if defined(_M_IX86) && defined(_MSC_VER) && !defined(AGG_NO_ASM)
264
+ //For Ix86 family processors this assembler code is used.
265
+ //The key command here is bsr - determination the number of the most
266
+ //significant bit of the value. For other processors
267
+ //(and maybe compilers) the pure C "#else" section is used.
268
+ __asm
269
+ {
270
+ mov ebx, val
271
+ mov edx, 11
272
+ bsr ecx, ebx
273
+ sub ecx, 9
274
+ jle less_than_9_bits
275
+ shr ecx, 1
276
+ adc ecx, 0
277
+ sub edx, ecx
278
+ shl ecx, 1
279
+ shr ebx, cl
280
+ less_than_9_bits:
281
+ xor eax, eax
282
+ mov ax, g_sqrt_table[ebx*2]
283
+ mov ecx, edx
284
+ shr eax, cl
285
+ }
286
+ #else
287
+
288
+ //This code is actually pure C and portable to most
289
+ //arcitectures including 64bit ones.
290
+ unsigned t = val;
291
+ int bit=0;
292
+ unsigned shift = 11;
293
+
294
+ //The following piece of code is just an emulation of the
295
+ //Ix86 assembler command "bsr" (see above). However on old
296
+ //Intels (like Intel MMX 233MHz) this code is about twice
297
+ //faster (sic!) then just one "bsr". On PIII and PIV the
298
+ //bsr is optimized quite well.
299
+ bit = t >> 24;
300
+ if(bit)
301
+ {
302
+ bit = g_elder_bit_table[bit] + 24;
303
+ }
304
+ else
305
+ {
306
+ bit = (t >> 16) & 0xFF;
307
+ if(bit)
308
+ {
309
+ bit = g_elder_bit_table[bit] + 16;
310
+ }
311
+ else
312
+ {
313
+ bit = (t >> 8) & 0xFF;
314
+ if(bit)
315
+ {
316
+ bit = g_elder_bit_table[bit] + 8;
317
+ }
318
+ else
319
+ {
320
+ bit = g_elder_bit_table[t];
321
+ }
322
+ }
323
+ }
324
+
325
+ //This code calculates the sqrt.
326
+ bit -= 9;
327
+ if(bit > 0)
328
+ {
329
+ bit = (bit >> 1) + (bit & 1);
330
+ shift -= bit;
331
+ val >>= (bit << 1);
332
+ }
333
+ return g_sqrt_table[val] >> shift;
334
+ #endif
335
+ }
336
+ #if defined(_MSC_VER)
337
+ #pragma warning(pop)
338
+ #endif
339
+
340
+
341
+
342
+
343
+ //--------------------------------------------------------------------besj
344
+ // Function BESJ calculates Bessel function of first kind of order n
345
+ // Arguments:
346
+ // n - an integer (>=0), the order
347
+ // x - value at which the Bessel function is required
348
+ //--------------------
349
+ // C++ Mathematical Library
350
+ // Convereted from equivalent FORTRAN library
351
+ // Converetd by Gareth Walker for use by course 392 computational project
352
+ // All functions tested and yield the same results as the corresponding
353
+ // FORTRAN versions.
354
+ //
355
+ // If you have any problems using these functions please report them to
356
357
+ //
358
+ // Documentation available on the web
359
+ // http://www.ma.umist.ac.uk/mrm/Teaching/392/libs/392.html
360
+ // Version 1.0 8/98
361
+ // 29 October, 1999
362
+ //--------------------
363
+ // Adapted for use in AGG library by Andy Wilk ([email protected])
364
+ //------------------------------------------------------------------------
365
+ inline double besj(double x, int n)
366
+ {
367
+ if(n < 0)
368
+ {
369
+ return 0;
370
+ }
371
+ double d = 1E-6;
372
+ double b = 0;
373
+ if(fabs(x) <= d)
374
+ {
375
+ if(n != 0) return 0;
376
+ return 1;
377
+ }
378
+ double b1 = 0; // b1 is the value from the previous iteration
379
+ // Set up a starting order for recurrence
380
+ int m1 = (int)fabs(x) + 6;
381
+ if(fabs(x) > 5)
382
+ {
383
+ m1 = (int)(fabs(1.4 * x + 60 / x));
384
+ }
385
+ int m2 = (int)(n + 2 + fabs(x) / 4);
386
+ if (m1 > m2)
387
+ {
388
+ m2 = m1;
389
+ }
390
+
391
+ // Apply recurrence down from curent max order
392
+ for(;;)
393
+ {
394
+ double c3 = 0;
395
+ double c2 = 1E-30;
396
+ double c4 = 0;
397
+ int m8 = 1;
398
+ if (m2 / 2 * 2 == m2)
399
+ {
400
+ m8 = -1;
401
+ }
402
+ int imax = m2 - 2;
403
+ for (int i = 1; i <= imax; i++)
404
+ {
405
+ double c6 = 2 * (m2 - i) * c2 / x - c3;
406
+ c3 = c2;
407
+ c2 = c6;
408
+ if(m2 - i - 1 == n)
409
+ {
410
+ b = c6;
411
+ }
412
+ m8 = -1 * m8;
413
+ if (m8 > 0)
414
+ {
415
+ c4 = c4 + 2 * c6;
416
+ }
417
+ }
418
+ double c6 = 2 * c2 / x - c3;
419
+ if(n == 0)
420
+ {
421
+ b = c6;
422
+ }
423
+ c4 += c6;
424
+ b /= c4;
425
+ if(fabs(b - b1) < d)
426
+ {
427
+ return b;
428
+ }
429
+ b1 = b;
430
+ m2 += 3;
431
+ }
432
+ }
433
+
434
+ }
435
+
436
+
437
+ #endif
data/bundled_deps/agg/agg/agg_path_storage.h ADDED
@@ -0,0 +1,1582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+
16
+ #ifndef AGG_PATH_STORAGE_INCLUDED
17
+ #define AGG_PATH_STORAGE_INCLUDED
18
+
19
+ #include <string.h>
20
+ #include <math.h>
21
+ #include "agg_math.h"
22
+ #include "agg_array.h"
23
+ #include "agg_bezier_arc.h"
24
+
25
+ namespace agg
26
+ {
27
+
28
+
29
+ //----------------------------------------------------vertex_block_storage
30
+ template<class T, unsigned BlockShift=8, unsigned BlockPool=256>
31
+ class vertex_block_storage
32
+ {
33
+ public:
34
+ // Allocation parameters
35
+ enum block_scale_e
36
+ {
37
+ block_shift = BlockShift,
38
+ block_size = 1 << block_shift,
39
+ block_mask = block_size - 1,
40
+ block_pool = BlockPool
41
+ };
42
+
43
+ typedef T value_type;
44
+ typedef vertex_block_storage<T, BlockShift, BlockPool> self_type;
45
+
46
+ ~vertex_block_storage();
47
+ vertex_block_storage();
48
+ vertex_block_storage(const self_type& v);
49
+ const self_type& operator = (const self_type& ps);
50
+
51
+ void remove_all();
52
+ void free_all();
53
+
54
+ void add_vertex(double x, double y, unsigned cmd);
55
+ void modify_vertex(unsigned idx, double x, double y);
56
+ void modify_vertex(unsigned idx, double x, double y, unsigned cmd);
57
+ void modify_command(unsigned idx, unsigned cmd);
58
+ void swap_vertices(unsigned v1, unsigned v2);
59
+
60
+ unsigned last_command() const;
61
+ unsigned last_vertex(double* x, double* y) const;
62
+ unsigned prev_vertex(double* x, double* y) const;
63
+
64
+ double last_x() const;
65
+ double last_y() const;
66
+
67
+ unsigned total_vertices() const;
68
+ unsigned vertex(unsigned idx, double* x, double* y) const;
69
+ unsigned command(unsigned idx) const;
70
+
71
+ private:
72
+ void allocate_block(unsigned nb);
73
+ int8u* storage_ptrs(T** xy_ptr);
74
+
75
+ private:
76
+ unsigned m_total_vertices;
77
+ unsigned m_total_blocks;
78
+ unsigned m_max_blocks;
79
+ T** m_coord_blocks;
80
+ int8u** m_cmd_blocks;
81
+ };
82
+
83
+
84
+ //------------------------------------------------------------------------
85
+ template<class T, unsigned S, unsigned P>
86
+ void vertex_block_storage<T,S,P>::free_all()
87
+ {
88
+ if(m_total_blocks)
89
+ {
90
+ T** coord_blk = m_coord_blocks + m_total_blocks - 1;
91
+ while(m_total_blocks--)
92
+ {
93
+ pod_allocator<T>::deallocate(
94
+ *coord_blk,
95
+ block_size * 2 +
96
+ block_size / (sizeof(T) / sizeof(unsigned char)));
97
+ --coord_blk;
98
+ }
99
+ pod_allocator<T*>::deallocate(m_coord_blocks, m_max_blocks * 2);
100
+ m_total_blocks = 0;
101
+ m_max_blocks = 0;
102
+ m_coord_blocks = 0;
103
+ m_cmd_blocks = 0;
104
+ m_total_vertices = 0;
105
+ }
106
+ }
107
+
108
+ //------------------------------------------------------------------------
109
+ template<class T, unsigned S, unsigned P>
110
+ vertex_block_storage<T,S,P>::~vertex_block_storage()
111
+ {
112
+ free_all();
113
+ }
114
+
115
+ //------------------------------------------------------------------------
116
+ template<class T, unsigned S, unsigned P>
117
+ vertex_block_storage<T,S,P>::vertex_block_storage() :
118
+ m_total_vertices(0),
119
+ m_total_blocks(0),
120
+ m_max_blocks(0),
121
+ m_coord_blocks(0),
122
+ m_cmd_blocks(0)
123
+ {
124
+ }
125
+
126
+ //------------------------------------------------------------------------
127
+ template<class T, unsigned S, unsigned P>
128
+ vertex_block_storage<T,S,P>::vertex_block_storage(const vertex_block_storage<T,S,P>& v) :
129
+ m_total_vertices(0),
130
+ m_total_blocks(0),
131
+ m_max_blocks(0),
132
+ m_coord_blocks(0),
133
+ m_cmd_blocks(0)
134
+ {
135
+ *this = v;
136
+ }
137
+
138
+ //------------------------------------------------------------------------
139
+ template<class T, unsigned S, unsigned P>
140
+ const vertex_block_storage<T,S,P>&
141
+ vertex_block_storage<T,S,P>::operator = (const vertex_block_storage<T,S,P>& v)
142
+ {
143
+ remove_all();
144
+ unsigned i;
145
+ for(i = 0; i < v.total_vertices(); i++)
146
+ {
147
+ double x, y;
148
+ unsigned cmd = v.vertex(i, &x, &y);
149
+ add_vertex(x, y, cmd);
150
+ }
151
+ return *this;
152
+ }
153
+
154
+ //------------------------------------------------------------------------
155
+ template<class T, unsigned S, unsigned P>
156
+ inline void vertex_block_storage<T,S,P>::remove_all()
157
+ {
158
+ m_total_vertices = 0;
159
+ }
160
+
161
+ //------------------------------------------------------------------------
162
+ template<class T, unsigned S, unsigned P>
163
+ inline void vertex_block_storage<T,S,P>::add_vertex(double x, double y,
164
+ unsigned cmd)
165
+ {
166
+ T* coord_ptr = 0;
167
+ *storage_ptrs(&coord_ptr) = (int8u)cmd;
168
+ coord_ptr[0] = T(x);
169
+ coord_ptr[1] = T(y);
170
+ m_total_vertices++;
171
+ }
172
+
173
+ //------------------------------------------------------------------------
174
+ template<class T, unsigned S, unsigned P>
175
+ inline void vertex_block_storage<T,S,P>::modify_vertex(unsigned idx,
176
+ double x, double y)
177
+ {
178
+ T* pv = m_coord_blocks[idx >> block_shift] + ((idx & block_mask) << 1);
179
+ pv[0] = T(x);
180
+ pv[1] = T(y);
181
+ }
182
+
183
+ //------------------------------------------------------------------------
184
+ template<class T, unsigned S, unsigned P>
185
+ inline void vertex_block_storage<T,S,P>::modify_vertex(unsigned idx,
186
+ double x, double y,
187
+ unsigned cmd)
188
+ {
189
+ unsigned block = idx >> block_shift;
190
+ unsigned offset = idx & block_mask;
191
+ T* pv = m_coord_blocks[block] + (offset << 1);
192
+ pv[0] = T(x);
193
+ pv[1] = T(y);
194
+ m_cmd_blocks[block][offset] = (int8u)cmd;
195
+ }
196
+
197
+ //------------------------------------------------------------------------
198
+ template<class T, unsigned S, unsigned P>
199
+ inline void vertex_block_storage<T,S,P>::modify_command(unsigned idx,
200
+ unsigned cmd)
201
+ {
202
+ m_cmd_blocks[idx >> block_shift][idx & block_mask] = (int8u)cmd;
203
+ }
204
+
205
+ //------------------------------------------------------------------------
206
+ template<class T, unsigned S, unsigned P>
207
+ inline void vertex_block_storage<T,S,P>::swap_vertices(unsigned v1, unsigned v2)
208
+ {
209
+ unsigned b1 = v1 >> block_shift;
210
+ unsigned b2 = v2 >> block_shift;
211
+ unsigned o1 = v1 & block_mask;
212
+ unsigned o2 = v2 & block_mask;
213
+ T* pv1 = m_coord_blocks[b1] + (o1 << 1);
214
+ T* pv2 = m_coord_blocks[b2] + (o2 << 1);
215
+ T val;
216
+ val = pv1[0]; pv1[0] = pv2[0]; pv2[0] = val;
217
+ val = pv1[1]; pv1[1] = pv2[1]; pv2[1] = val;
218
+ int8u cmd = m_cmd_blocks[b1][o1];
219
+ m_cmd_blocks[b1][o1] = m_cmd_blocks[b2][o2];
220
+ m_cmd_blocks[b2][o2] = cmd;
221
+ }
222
+
223
+ //------------------------------------------------------------------------
224
+ template<class T, unsigned S, unsigned P>
225
+ inline unsigned vertex_block_storage<T,S,P>::last_command() const
226
+ {
227
+ if(m_total_vertices) return command(m_total_vertices - 1);
228
+ return path_cmd_stop;
229
+ }
230
+
231
+ //------------------------------------------------------------------------
232
+ template<class T, unsigned S, unsigned P>
233
+ inline unsigned vertex_block_storage<T,S,P>::last_vertex(double* x, double* y) const
234
+ {
235
+ if(m_total_vertices) return vertex(m_total_vertices - 1, x, y);
236
+ return path_cmd_stop;
237
+ }
238
+
239
+ //------------------------------------------------------------------------
240
+ template<class T, unsigned S, unsigned P>
241
+ inline unsigned vertex_block_storage<T,S,P>::prev_vertex(double* x, double* y) const
242
+ {
243
+ if(m_total_vertices > 1) return vertex(m_total_vertices - 2, x, y);
244
+ return path_cmd_stop;
245
+ }
246
+
247
+ //------------------------------------------------------------------------
248
+ template<class T, unsigned S, unsigned P>
249
+ inline double vertex_block_storage<T,S,P>::last_x() const
250
+ {
251
+ if(m_total_vertices)
252
+ {
253
+ unsigned idx = m_total_vertices - 1;
254
+ return m_coord_blocks[idx >> block_shift][(idx & block_mask) << 1];
255
+ }
256
+ return 0.0;
257
+ }
258
+
259
+ //------------------------------------------------------------------------
260
+ template<class T, unsigned S, unsigned P>
261
+ inline double vertex_block_storage<T,S,P>::last_y() const
262
+ {
263
+ if(m_total_vertices)
264
+ {
265
+ unsigned idx = m_total_vertices - 1;
266
+ return m_coord_blocks[idx >> block_shift][((idx & block_mask) << 1) + 1];
267
+ }
268
+ return 0.0;
269
+ }
270
+
271
+ //------------------------------------------------------------------------
272
+ template<class T, unsigned S, unsigned P>
273
+ inline unsigned vertex_block_storage<T,S,P>::total_vertices() const
274
+ {
275
+ return m_total_vertices;
276
+ }
277
+
278
+ //------------------------------------------------------------------------
279
+ template<class T, unsigned S, unsigned P>
280
+ inline unsigned vertex_block_storage<T,S,P>::vertex(unsigned idx,
281
+ double* x, double* y) const
282
+ {
283
+ unsigned nb = idx >> block_shift;
284
+ const T* pv = m_coord_blocks[nb] + ((idx & block_mask) << 1);
285
+ *x = pv[0];
286
+ *y = pv[1];
287
+ return m_cmd_blocks[nb][idx & block_mask];
288
+ }
289
+
290
+ //------------------------------------------------------------------------
291
+ template<class T, unsigned S, unsigned P>
292
+ inline unsigned vertex_block_storage<T,S,P>::command(unsigned idx) const
293
+ {
294
+ return m_cmd_blocks[idx >> block_shift][idx & block_mask];
295
+ }
296
+
297
+ //------------------------------------------------------------------------
298
+ template<class T, unsigned S, unsigned P>
299
+ void vertex_block_storage<T,S,P>::allocate_block(unsigned nb)
300
+ {
301
+ if(nb >= m_max_blocks)
302
+ {
303
+ T** new_coords =
304
+ pod_allocator<T*>::allocate((m_max_blocks + block_pool) * 2);
305
+
306
+ unsigned char** new_cmds =
307
+ (unsigned char**)(new_coords + m_max_blocks + block_pool);
308
+
309
+ if(m_coord_blocks)
310
+ {
311
+ memcpy(new_coords,
312
+ m_coord_blocks,
313
+ m_max_blocks * sizeof(T*));
314
+
315
+ memcpy(new_cmds,
316
+ m_cmd_blocks,
317
+ m_max_blocks * sizeof(unsigned char*));
318
+
319
+ pod_allocator<T*>::deallocate(m_coord_blocks, m_max_blocks * 2);
320
+ }
321
+ m_coord_blocks = new_coords;
322
+ m_cmd_blocks = new_cmds;
323
+ m_max_blocks += block_pool;
324
+ }
325
+ m_coord_blocks[nb] =
326
+ pod_allocator<T>::allocate(block_size * 2 +
327
+ block_size / (sizeof(T) / sizeof(unsigned char)));
328
+
329
+ m_cmd_blocks[nb] =
330
+ (unsigned char*)(m_coord_blocks[nb] + block_size * 2);
331
+
332
+ m_total_blocks++;
333
+ }
334
+
335
+ //------------------------------------------------------------------------
336
+ template<class T, unsigned S, unsigned P>
337
+ int8u* vertex_block_storage<T,S,P>::storage_ptrs(T** xy_ptr)
338
+ {
339
+ unsigned nb = m_total_vertices >> block_shift;
340
+ if(nb >= m_total_blocks)
341
+ {
342
+ allocate_block(nb);
343
+ }
344
+ *xy_ptr = m_coord_blocks[nb] + ((m_total_vertices & block_mask) << 1);
345
+ return m_cmd_blocks[nb] + (m_total_vertices & block_mask);
346
+ }
347
+
348
+
349
+
350
+
351
+ //-----------------------------------------------------poly_plain_adaptor
352
+ template<class T> class poly_plain_adaptor
353
+ {
354
+ public:
355
+ typedef T value_type;
356
+
357
+ poly_plain_adaptor() :
358
+ m_data(0),
359
+ m_ptr(0),
360
+ m_end(0),
361
+ m_closed(false),
362
+ m_stop(false)
363
+ {}
364
+
365
+ poly_plain_adaptor(const T* data, unsigned num_points, bool closed) :
366
+ m_data(data),
367
+ m_ptr(data),
368
+ m_end(data + num_points * 2),
369
+ m_closed(closed),
370
+ m_stop(false)
371
+ {}
372
+
373
+ void init(const T* data, unsigned num_points, bool closed)
374
+ {
375
+ m_data = data;
376
+ m_ptr = data;
377
+ m_end = data + num_points * 2;
378
+ m_closed = closed;
379
+ m_stop = false;
380
+ }
381
+
382
+ void rewind(unsigned)
383
+ {
384
+ m_ptr = m_data;
385
+ m_stop = false;
386
+ }
387
+
388
+ unsigned vertex(double* x, double* y)
389
+ {
390
+ if(m_ptr < m_end)
391
+ {
392
+ bool first = m_ptr == m_data;
393
+ *x = *m_ptr++;
394
+ *y = *m_ptr++;
395
+ return first ? path_cmd_move_to : path_cmd_line_to;
396
+ }
397
+ *x = *y = 0.0;
398
+ if(m_closed && !m_stop)
399
+ {
400
+ m_stop = true;
401
+ return path_cmd_end_poly | path_flags_close;
402
+ }
403
+ return path_cmd_stop;
404
+ }
405
+
406
+ private:
407
+ const T* m_data;
408
+ const T* m_ptr;
409
+ const T* m_end;
410
+ bool m_closed;
411
+ bool m_stop;
412
+ };
413
+
414
+
415
+
416
+
417
+
418
+ //-------------------------------------------------poly_container_adaptor
419
+ template<class Container> class poly_container_adaptor
420
+ {
421
+ public:
422
+ typedef typename Container::value_type vertex_type;
423
+
424
+ poly_container_adaptor() :
425
+ m_container(0),
426
+ m_index(0),
427
+ m_closed(false),
428
+ m_stop(false)
429
+ {}
430
+
431
+ poly_container_adaptor(const Container& data, bool closed) :
432
+ m_container(&data),
433
+ m_index(0),
434
+ m_closed(closed),
435
+ m_stop(false)
436
+ {}
437
+
438
+ void init(const Container& data, bool closed)
439
+ {
440
+ m_container = &data;
441
+ m_index = 0;
442
+ m_closed = closed;
443
+ m_stop = false;
444
+ }
445
+
446
+ void rewind(unsigned)
447
+ {
448
+ m_index = 0;
449
+ m_stop = false;
450
+ }
451
+
452
+ unsigned vertex(double* x, double* y)
453
+ {
454
+ if(m_index < m_container->size())
455
+ {
456
+ bool first = m_index == 0;
457
+ const vertex_type& v = (*m_container)[m_index++];
458
+ *x = v.x;
459
+ *y = v.y;
460
+ return first ? path_cmd_move_to : path_cmd_line_to;
461
+ }
462
+ *x = *y = 0.0;
463
+ if(m_closed && !m_stop)
464
+ {
465
+ m_stop = true;
466
+ return path_cmd_end_poly | path_flags_close;
467
+ }
468
+ return path_cmd_stop;
469
+ }
470
+
471
+ private:
472
+ const Container* m_container;
473
+ unsigned m_index;
474
+ bool m_closed;
475
+ bool m_stop;
476
+ };
477
+
478
+
479
+
480
+ //-----------------------------------------poly_container_reverse_adaptor
481
+ template<class Container> class poly_container_reverse_adaptor
482
+ {
483
+ public:
484
+ typedef typename Container::value_type vertex_type;
485
+
486
+ poly_container_reverse_adaptor() :
487
+ m_container(0),
488
+ m_index(-1),
489
+ m_closed(false),
490
+ m_stop(false)
491
+ {}
492
+
493
+ poly_container_reverse_adaptor(Container& data, bool closed) :
494
+ m_container(&data),
495
+ m_index(-1),
496
+ m_closed(closed),
497
+ m_stop(false)
498
+ {}
499
+
500
+ void init(Container& data, bool closed)
501
+ {
502
+ m_container = &data;
503
+ m_index = m_container->size() - 1;
504
+ m_closed = closed;
505
+ m_stop = false;
506
+ }
507
+
508
+ void rewind(unsigned)
509
+ {
510
+ m_index = m_container->size() - 1;
511
+ m_stop = false;
512
+ }
513
+
514
+ unsigned vertex(double* x, double* y)
515
+ {
516
+ if(m_index >= 0)
517
+ {
518
+ bool first = m_index == int(m_container->size() - 1);
519
+ const vertex_type& v = (*m_container)[m_index--];
520
+ *x = v.x;
521
+ *y = v.y;
522
+ return first ? path_cmd_move_to : path_cmd_line_to;
523
+ }
524
+ *x = *y = 0.0;
525
+ if(m_closed && !m_stop)
526
+ {
527
+ m_stop = true;
528
+ return path_cmd_end_poly | path_flags_close;
529
+ }
530
+ return path_cmd_stop;
531
+ }
532
+
533
+ private:
534
+ Container* m_container;
535
+ int m_index;
536
+ bool m_closed;
537
+ bool m_stop;
538
+ };
539
+
540
+
541
+
542
+
543
+
544
+ //--------------------------------------------------------line_adaptor
545
+ class line_adaptor
546
+ {
547
+ public:
548
+ typedef double value_type;
549
+
550
+ line_adaptor() : m_line(m_coord, 2, false) {}
551
+ line_adaptor(double x1, double y1, double x2, double y2) :
552
+ m_line(m_coord, 2, false)
553
+ {
554
+ m_coord[0] = x1;
555
+ m_coord[1] = y1;
556
+ m_coord[2] = x2;
557
+ m_coord[3] = y2;
558
+ }
559
+
560
+ void init(double x1, double y1, double x2, double y2)
561
+ {
562
+ m_coord[0] = x1;
563
+ m_coord[1] = y1;
564
+ m_coord[2] = x2;
565
+ m_coord[3] = y2;
566
+ m_line.rewind(0);
567
+ }
568
+
569
+ void rewind(unsigned)
570
+ {
571
+ m_line.rewind(0);
572
+ }
573
+
574
+ unsigned vertex(double* x, double* y)
575
+ {
576
+ return m_line.vertex(x, y);
577
+ }
578
+
579
+ private:
580
+ double m_coord[4];
581
+ poly_plain_adaptor<double> m_line;
582
+ };
583
+
584
+
585
+
586
+
587
+
588
+
589
+
590
+
591
+
592
+
593
+
594
+
595
+
596
+ //---------------------------------------------------------------path_base
597
+ // A container to store vertices with their flags.
598
+ // A path consists of a number of contours separated with "move_to"
599
+ // commands. The path storage can keep and maintain more than one
600
+ // path.
601
+ // To navigate to the beginning of a particular path, use rewind(path_id);
602
+ // Where path_id is what start_new_path() returns. So, when you call
603
+ // start_new_path() you need to store its return value somewhere else
604
+ // to navigate to the path afterwards.
605
+ //
606
+ // See also: vertex_source concept
607
+ //------------------------------------------------------------------------
608
+ template<class VertexContainer> class path_base
609
+ {
610
+ public:
611
+ typedef VertexContainer container_type;
612
+ typedef path_base<VertexContainer> self_type;
613
+
614
+ //--------------------------------------------------------------------
615
+ path_base() : m_vertices(), m_iterator(0) {}
616
+ void remove_all() { m_vertices.remove_all(); m_iterator = 0; }
617
+ void free_all() { m_vertices.free_all(); m_iterator = 0; }
618
+
619
+ // Make path functions
620
+ //--------------------------------------------------------------------
621
+ unsigned start_new_path();
622
+
623
+ void move_to(double x, double y);
624
+ void move_rel(double dx, double dy);
625
+
626
+ void line_to(double x, double y);
627
+ void line_rel(double dx, double dy);
628
+
629
+ void hline_to(double x);
630
+ void hline_rel(double dx);
631
+
632
+ void vline_to(double y);
633
+ void vline_rel(double dy);
634
+
635
+ void arc_to(double rx, double ry,
636
+ double angle,
637
+ bool large_arc_flag,
638
+ bool sweep_flag,
639
+ double x, double y);
640
+
641
+ void arc_rel(double rx, double ry,
642
+ double angle,
643
+ bool large_arc_flag,
644
+ bool sweep_flag,
645
+ double dx, double dy);
646
+
647
+ void curve3(double x_ctrl, double y_ctrl,
648
+ double x_to, double y_to);
649
+
650
+ void curve3_rel(double dx_ctrl, double dy_ctrl,
651
+ double dx_to, double dy_to);
652
+
653
+ void curve3(double x_to, double y_to);
654
+
655
+ void curve3_rel(double dx_to, double dy_to);
656
+
657
+ void curve4(double x_ctrl1, double y_ctrl1,
658
+ double x_ctrl2, double y_ctrl2,
659
+ double x_to, double y_to);
660
+
661
+ void curve4_rel(double dx_ctrl1, double dy_ctrl1,
662
+ double dx_ctrl2, double dy_ctrl2,
663
+ double dx_to, double dy_to);
664
+
665
+ void curve4(double x_ctrl2, double y_ctrl2,
666
+ double x_to, double y_to);
667
+
668
+ void curve4_rel(double x_ctrl2, double y_ctrl2,
669
+ double x_to, double y_to);
670
+
671
+
672
+ void end_poly(unsigned flags = path_flags_close);
673
+ void close_polygon(unsigned flags = path_flags_none);
674
+
675
+ // Accessors
676
+ //--------------------------------------------------------------------
677
+ const container_type& vertices() const { return m_vertices; }
678
+ container_type& vertices() { return m_vertices; }
679
+
680
+ unsigned total_vertices() const;
681
+
682
+ void rel_to_abs(double* x, double* y) const;
683
+
684
+ unsigned last_vertex(double* x, double* y) const;
685
+ unsigned prev_vertex(double* x, double* y) const;
686
+
687
+ double last_x() const;
688
+ double last_y() const;
689
+
690
+ unsigned vertex(unsigned idx, double* x, double* y) const;
691
+ unsigned command(unsigned idx) const;
692
+
693
+ void modify_vertex(unsigned idx, double x, double y);
694
+ void modify_vertex(unsigned idx, double x, double y, unsigned cmd);
695
+ void modify_command(unsigned idx, unsigned cmd);
696
+
697
+ // VertexSource interface
698
+ //--------------------------------------------------------------------
699
+ void rewind(unsigned path_id);
700
+ unsigned vertex(double* x, double* y);
701
+
702
+ // Arrange the orientation of a polygon, all polygons in a path,
703
+ // or in all paths. After calling arrange_orientations() or
704
+ // arrange_orientations_all_paths(), all the polygons will have
705
+ // the same orientation, i.e. path_flags_cw or path_flags_ccw
706
+ //--------------------------------------------------------------------
707
+ unsigned arrange_polygon_orientation(unsigned start, path_flags_e orientation);
708
+ unsigned arrange_orientations(unsigned path_id, path_flags_e orientation);
709
+ void arrange_orientations_all_paths(path_flags_e orientation);
710
+ void invert_polygon(unsigned start);
711
+
712
+ // Flip all vertices horizontally or vertically,
713
+ // between x1 and x2, or between y1 and y2 respectively
714
+ //--------------------------------------------------------------------
715
+ void flip_x(double x1, double x2);
716
+ void flip_y(double y1, double y2);
717
+
718
+ // Concatenate path. The path is added as is.
719
+ //--------------------------------------------------------------------
720
+ template<class VertexSource>
721
+ void concat_path(VertexSource& vs, unsigned path_id = 0)
722
+ {
723
+ double x, y;
724
+ unsigned cmd;
725
+ vs.rewind(path_id);
726
+ while(!is_stop(cmd = vs.vertex(&x, &y)))
727
+ {
728
+ m_vertices.add_vertex(x, y, cmd);
729
+ }
730
+ }
731
+
732
+ //--------------------------------------------------------------------
733
+ // Join path. The path is joined with the existing one, that is,
734
+ // it behaves as if the pen of a plotter was always down (drawing)
735
+ template<class VertexSource>
736
+ void join_path(VertexSource& vs, unsigned path_id = 0)
737
+ {
738
+ double x, y;
739
+ unsigned cmd;
740
+ vs.rewind(path_id);
741
+ cmd = vs.vertex(&x, &y);
742
+ if(!is_stop(cmd))
743
+ {
744
+ if(is_vertex(cmd))
745
+ {
746
+ double x0, y0;
747
+ unsigned cmd0 = last_vertex(&x0, &y0);
748
+ if(is_vertex(cmd0))
749
+ {
750
+ if(calc_distance(x, y, x0, y0) > vertex_dist_epsilon)
751
+ {
752
+ if(is_move_to(cmd)) cmd = path_cmd_line_to;
753
+ m_vertices.add_vertex(x, y, cmd);
754
+ }
755
+ }
756
+ else
757
+ {
758
+ if(is_stop(cmd0))
759
+ {
760
+ cmd = path_cmd_move_to;
761
+ }
762
+ else
763
+ {
764
+ if(is_move_to(cmd)) cmd = path_cmd_line_to;
765
+ }
766
+ m_vertices.add_vertex(x, y, cmd);
767
+ }
768
+ }
769
+ while(!is_stop(cmd = vs.vertex(&x, &y)))
770
+ {
771
+ m_vertices.add_vertex(x, y, is_move_to(cmd) ?
772
+ unsigned(path_cmd_line_to) :
773
+ cmd);
774
+ }
775
+ }
776
+ }
777
+
778
+ // Concatenate polygon/polyline.
779
+ //--------------------------------------------------------------------
780
+ template<class T> void concat_poly(const T* data,
781
+ unsigned num_points,
782
+ bool closed)
783
+ {
784
+ poly_plain_adaptor<T> poly(data, num_points, closed);
785
+ concat_path(poly);
786
+ }
787
+
788
+ // Join polygon/polyline continuously.
789
+ //--------------------------------------------------------------------
790
+ template<class T> void join_poly(const T* data,
791
+ unsigned num_points,
792
+ bool closed)
793
+ {
794
+ poly_plain_adaptor<T> poly(data, num_points, closed);
795
+ join_path(poly);
796
+ }
797
+
798
+ //--------------------------------------------------------------------
799
+ void translate(double dx, double dy, unsigned path_id=0);
800
+ void translate_all_paths(double dx, double dy);
801
+
802
+ //--------------------------------------------------------------------
803
+ template<class Trans>
804
+ void transform(const Trans& trans, unsigned path_id=0)
805
+ {
806
+ unsigned num_ver = m_vertices.total_vertices();
807
+ for(; path_id < num_ver; path_id++)
808
+ {
809
+ double x, y;
810
+ unsigned cmd = m_vertices.vertex(path_id, &x, &y);
811
+ if(is_stop(cmd)) break;
812
+ if(is_vertex(cmd))
813
+ {
814
+ trans.transform(&x, &y);
815
+ m_vertices.modify_vertex(path_id, x, y);
816
+ }
817
+ }
818
+ }
819
+
820
+ //--------------------------------------------------------------------
821
+ template<class Trans>
822
+ void transform_all_paths(const Trans& trans)
823
+ {
824
+ unsigned idx;
825
+ unsigned num_ver = m_vertices.total_vertices();
826
+ for(idx = 0; idx < num_ver; idx++)
827
+ {
828
+ double x, y;
829
+ if(is_vertex(m_vertices.vertex(idx, &x, &y)))
830
+ {
831
+ trans.transform(&x, &y);
832
+ m_vertices.modify_vertex(idx, x, y);
833
+ }
834
+ }
835
+ }
836
+
837
+
838
+ //--------------------------------------------------------------------
839
+ // If the end points of a path are very, very close then make them
840
+ // exactly equal so that the stroke converter is not confused.
841
+ //--------------------------------------------------------------------
842
+ unsigned align_path(unsigned idx = 0)
843
+ {
844
+ if (idx >= total_vertices() || !is_move_to(command(idx)))
845
+ {
846
+ return total_vertices();
847
+ }
848
+
849
+ double start_x, start_y;
850
+ for (; idx < total_vertices() && is_move_to(command(idx)); ++idx)
851
+ {
852
+ vertex(idx, &start_x, &start_y);
853
+ }
854
+ while (idx < total_vertices() && is_drawing(command(idx)))
855
+ ++idx;
856
+
857
+ double x, y;
858
+ if (is_drawing(vertex(idx - 1, &x, &y)) &&
859
+ is_equal_eps(x, start_x, 1e-8) &&
860
+ is_equal_eps(y, start_y, 1e-8))
861
+ {
862
+ modify_vertex(idx - 1, start_x, start_y);
863
+ }
864
+
865
+ while (idx < total_vertices() && !is_move_to(command(idx)))
866
+ ++idx;
867
+ return idx;
868
+ }
869
+
870
+ void align_all_paths()
871
+ {
872
+ for (unsigned i = 0; i < total_vertices(); i = align_path(i));
873
+ }
874
+
875
+
876
+ private:
877
+ unsigned perceive_polygon_orientation(unsigned start, unsigned end);
878
+ void invert_polygon(unsigned start, unsigned end);
879
+
880
+ VertexContainer m_vertices;
881
+ unsigned m_iterator;
882
+ };
883
+
884
+ //------------------------------------------------------------------------
885
+ template<class VC>
886
+ unsigned path_base<VC>::start_new_path()
887
+ {
888
+ if(!is_stop(m_vertices.last_command()))
889
+ {
890
+ m_vertices.add_vertex(0.0, 0.0, path_cmd_stop);
891
+ }
892
+ return m_vertices.total_vertices();
893
+ }
894
+
895
+
896
+ //------------------------------------------------------------------------
897
+ template<class VC>
898
+ inline void path_base<VC>::rel_to_abs(double* x, double* y) const
899
+ {
900
+ if(m_vertices.total_vertices())
901
+ {
902
+ double x2;
903
+ double y2;
904
+ if(is_vertex(m_vertices.last_vertex(&x2, &y2)))
905
+ {
906
+ *x += x2;
907
+ *y += y2;
908
+ }
909
+ }
910
+ }
911
+
912
+ //------------------------------------------------------------------------
913
+ template<class VC>
914
+ inline void path_base<VC>::move_to(double x, double y)
915
+ {
916
+ m_vertices.add_vertex(x, y, path_cmd_move_to);
917
+ }
918
+
919
+ //------------------------------------------------------------------------
920
+ template<class VC>
921
+ inline void path_base<VC>::move_rel(double dx, double dy)
922
+ {
923
+ rel_to_abs(&dx, &dy);
924
+ m_vertices.add_vertex(dx, dy, path_cmd_move_to);
925
+ }
926
+
927
+ //------------------------------------------------------------------------
928
+ template<class VC>
929
+ inline void path_base<VC>::line_to(double x, double y)
930
+ {
931
+ m_vertices.add_vertex(x, y, path_cmd_line_to);
932
+ }
933
+
934
+ //------------------------------------------------------------------------
935
+ template<class VC>
936
+ inline void path_base<VC>::line_rel(double dx, double dy)
937
+ {
938
+ rel_to_abs(&dx, &dy);
939
+ m_vertices.add_vertex(dx, dy, path_cmd_line_to);
940
+ }
941
+
942
+ //------------------------------------------------------------------------
943
+ template<class VC>
944
+ inline void path_base<VC>::hline_to(double x)
945
+ {
946
+ m_vertices.add_vertex(x, last_y(), path_cmd_line_to);
947
+ }
948
+
949
+ //------------------------------------------------------------------------
950
+ template<class VC>
951
+ inline void path_base<VC>::hline_rel(double dx)
952
+ {
953
+ double dy = 0;
954
+ rel_to_abs(&dx, &dy);
955
+ m_vertices.add_vertex(dx, dy, path_cmd_line_to);
956
+ }
957
+
958
+ //------------------------------------------------------------------------
959
+ template<class VC>
960
+ inline void path_base<VC>::vline_to(double y)
961
+ {
962
+ m_vertices.add_vertex(last_x(), y, path_cmd_line_to);
963
+ }
964
+
965
+ //------------------------------------------------------------------------
966
+ template<class VC>
967
+ inline void path_base<VC>::vline_rel(double dy)
968
+ {
969
+ double dx = 0;
970
+ rel_to_abs(&dx, &dy);
971
+ m_vertices.add_vertex(dx, dy, path_cmd_line_to);
972
+ }
973
+
974
+ //------------------------------------------------------------------------
975
+ template<class VC>
976
+ void path_base<VC>::arc_to(double rx, double ry,
977
+ double angle,
978
+ bool large_arc_flag,
979
+ bool sweep_flag,
980
+ double x, double y)
981
+ {
982
+ if(m_vertices.total_vertices() && is_vertex(m_vertices.last_command()))
983
+ {
984
+ const double epsilon = 1e-30;
985
+ double x0 = 0.0;
986
+ double y0 = 0.0;
987
+ m_vertices.last_vertex(&x0, &y0);
988
+
989
+ rx = fabs(rx);
990
+ ry = fabs(ry);
991
+
992
+ // Ensure radii are valid
993
+ //-------------------------
994
+ if(rx < epsilon || ry < epsilon)
995
+ {
996
+ line_to(x, y);
997
+ return;
998
+ }
999
+
1000
+ if(calc_distance(x0, y0, x, y) < epsilon)
1001
+ {
1002
+ // If the endpoints (x, y) and (x0, y0) are identical, then this
1003
+ // is equivalent to omitting the elliptical arc segment entirely.
1004
+ return;
1005
+ }
1006
+ bezier_arc_svg a(x0, y0, rx, ry, angle, large_arc_flag, sweep_flag, x, y);
1007
+ if(a.radii_ok())
1008
+ {
1009
+ join_path(a);
1010
+ }
1011
+ else
1012
+ {
1013
+ line_to(x, y);
1014
+ }
1015
+ }
1016
+ else
1017
+ {
1018
+ move_to(x, y);
1019
+ }
1020
+ }
1021
+
1022
+ //------------------------------------------------------------------------
1023
+ template<class VC>
1024
+ void path_base<VC>::arc_rel(double rx, double ry,
1025
+ double angle,
1026
+ bool large_arc_flag,
1027
+ bool sweep_flag,
1028
+ double dx, double dy)
1029
+ {
1030
+ rel_to_abs(&dx, &dy);
1031
+ arc_to(rx, ry, angle, large_arc_flag, sweep_flag, dx, dy);
1032
+ }
1033
+
1034
+ //------------------------------------------------------------------------
1035
+ template<class VC>
1036
+ void path_base<VC>::curve3(double x_ctrl, double y_ctrl,
1037
+ double x_to, double y_to)
1038
+ {
1039
+ m_vertices.add_vertex(x_ctrl, y_ctrl, path_cmd_curve3);
1040
+ m_vertices.add_vertex(x_to, y_to, path_cmd_curve3);
1041
+ }
1042
+
1043
+ //------------------------------------------------------------------------
1044
+ template<class VC>
1045
+ void path_base<VC>::curve3_rel(double dx_ctrl, double dy_ctrl,
1046
+ double dx_to, double dy_to)
1047
+ {
1048
+ rel_to_abs(&dx_ctrl, &dy_ctrl);
1049
+ rel_to_abs(&dx_to, &dy_to);
1050
+ m_vertices.add_vertex(dx_ctrl, dy_ctrl, path_cmd_curve3);
1051
+ m_vertices.add_vertex(dx_to, dy_to, path_cmd_curve3);
1052
+ }
1053
+
1054
+ //------------------------------------------------------------------------
1055
+ template<class VC>
1056
+ void path_base<VC>::curve3(double x_to, double y_to)
1057
+ {
1058
+ double x0;
1059
+ double y0;
1060
+ if(is_vertex(m_vertices.last_vertex(&x0, &y0)))
1061
+ {
1062
+ double x_ctrl;
1063
+ double y_ctrl;
1064
+ unsigned cmd = m_vertices.prev_vertex(&x_ctrl, &y_ctrl);
1065
+ if(is_curve(cmd))
1066
+ {
1067
+ x_ctrl = x0 + x0 - x_ctrl;
1068
+ y_ctrl = y0 + y0 - y_ctrl;
1069
+ }
1070
+ else
1071
+ {
1072
+ x_ctrl = x0;
1073
+ y_ctrl = y0;
1074
+ }
1075
+ curve3(x_ctrl, y_ctrl, x_to, y_to);
1076
+ }
1077
+ }
1078
+
1079
+ //------------------------------------------------------------------------
1080
+ template<class VC>
1081
+ void path_base<VC>::curve3_rel(double dx_to, double dy_to)
1082
+ {
1083
+ rel_to_abs(&dx_to, &dy_to);
1084
+ curve3(dx_to, dy_to);
1085
+ }
1086
+
1087
+ //------------------------------------------------------------------------
1088
+ template<class VC>
1089
+ void path_base<VC>::curve4(double x_ctrl1, double y_ctrl1,
1090
+ double x_ctrl2, double y_ctrl2,
1091
+ double x_to, double y_to)
1092
+ {
1093
+ m_vertices.add_vertex(x_ctrl1, y_ctrl1, path_cmd_curve4);
1094
+ m_vertices.add_vertex(x_ctrl2, y_ctrl2, path_cmd_curve4);
1095
+ m_vertices.add_vertex(x_to, y_to, path_cmd_curve4);
1096
+ }
1097
+
1098
+ //------------------------------------------------------------------------
1099
+ template<class VC>
1100
+ void path_base<VC>::curve4_rel(double dx_ctrl1, double dy_ctrl1,
1101
+ double dx_ctrl2, double dy_ctrl2,
1102
+ double dx_to, double dy_to)
1103
+ {
1104
+ rel_to_abs(&dx_ctrl1, &dy_ctrl1);
1105
+ rel_to_abs(&dx_ctrl2, &dy_ctrl2);
1106
+ rel_to_abs(&dx_to, &dy_to);
1107
+ m_vertices.add_vertex(dx_ctrl1, dy_ctrl1, path_cmd_curve4);
1108
+ m_vertices.add_vertex(dx_ctrl2, dy_ctrl2, path_cmd_curve4);
1109
+ m_vertices.add_vertex(dx_to, dy_to, path_cmd_curve4);
1110
+ }
1111
+
1112
+ //------------------------------------------------------------------------
1113
+ template<class VC>
1114
+ void path_base<VC>::curve4(double x_ctrl2, double y_ctrl2,
1115
+ double x_to, double y_to)
1116
+ {
1117
+ double x0;
1118
+ double y0;
1119
+ if(is_vertex(last_vertex(&x0, &y0)))
1120
+ {
1121
+ double x_ctrl1;
1122
+ double y_ctrl1;
1123
+ unsigned cmd = prev_vertex(&x_ctrl1, &y_ctrl1);
1124
+ if(is_curve(cmd))
1125
+ {
1126
+ x_ctrl1 = x0 + x0 - x_ctrl1;
1127
+ y_ctrl1 = y0 + y0 - y_ctrl1;
1128
+ }
1129
+ else
1130
+ {
1131
+ x_ctrl1 = x0;
1132
+ y_ctrl1 = y0;
1133
+ }
1134
+ curve4(x_ctrl1, y_ctrl1, x_ctrl2, y_ctrl2, x_to, y_to);
1135
+ }
1136
+ }
1137
+
1138
+ //------------------------------------------------------------------------
1139
+ template<class VC>
1140
+ void path_base<VC>::curve4_rel(double dx_ctrl2, double dy_ctrl2,
1141
+ double dx_to, double dy_to)
1142
+ {
1143
+ rel_to_abs(&dx_ctrl2, &dy_ctrl2);
1144
+ rel_to_abs(&dx_to, &dy_to);
1145
+ curve4(dx_ctrl2, dy_ctrl2, dx_to, dy_to);
1146
+ }
1147
+
1148
+ //------------------------------------------------------------------------
1149
+ template<class VC>
1150
+ inline void path_base<VC>::end_poly(unsigned flags)
1151
+ {
1152
+ if(is_vertex(m_vertices.last_command()))
1153
+ {
1154
+ m_vertices.add_vertex(0.0, 0.0, path_cmd_end_poly | flags);
1155
+ }
1156
+ }
1157
+
1158
+ //------------------------------------------------------------------------
1159
+ template<class VC>
1160
+ inline void path_base<VC>::close_polygon(unsigned flags)
1161
+ {
1162
+ end_poly(path_flags_close | flags);
1163
+ }
1164
+
1165
+ //------------------------------------------------------------------------
1166
+ template<class VC>
1167
+ inline unsigned path_base<VC>::total_vertices() const
1168
+ {
1169
+ return m_vertices.total_vertices();
1170
+ }
1171
+
1172
+ //------------------------------------------------------------------------
1173
+ template<class VC>
1174
+ inline unsigned path_base<VC>::last_vertex(double* x, double* y) const
1175
+ {
1176
+ return m_vertices.last_vertex(x, y);
1177
+ }
1178
+
1179
+ //------------------------------------------------------------------------
1180
+ template<class VC>
1181
+ inline unsigned path_base<VC>::prev_vertex(double* x, double* y) const
1182
+ {
1183
+ return m_vertices.prev_vertex(x, y);
1184
+ }
1185
+
1186
+ //------------------------------------------------------------------------
1187
+ template<class VC>
1188
+ inline double path_base<VC>::last_x() const
1189
+ {
1190
+ return m_vertices.last_x();
1191
+ }
1192
+
1193
+ //------------------------------------------------------------------------
1194
+ template<class VC>
1195
+ inline double path_base<VC>::last_y() const
1196
+ {
1197
+ return m_vertices.last_y();
1198
+ }
1199
+
1200
+ //------------------------------------------------------------------------
1201
+ template<class VC>
1202
+ inline unsigned path_base<VC>::vertex(unsigned idx, double* x, double* y) const
1203
+ {
1204
+ return m_vertices.vertex(idx, x, y);
1205
+ }
1206
+
1207
+ //------------------------------------------------------------------------
1208
+ template<class VC>
1209
+ inline unsigned path_base<VC>::command(unsigned idx) const
1210
+ {
1211
+ return m_vertices.command(idx);
1212
+ }
1213
+
1214
+ //------------------------------------------------------------------------
1215
+ template<class VC>
1216
+ void path_base<VC>::modify_vertex(unsigned idx, double x, double y)
1217
+ {
1218
+ m_vertices.modify_vertex(idx, x, y);
1219
+ }
1220
+
1221
+ //------------------------------------------------------------------------
1222
+ template<class VC>
1223
+ void path_base<VC>::modify_vertex(unsigned idx, double x, double y, unsigned cmd)
1224
+ {
1225
+ m_vertices.modify_vertex(idx, x, y, cmd);
1226
+ }
1227
+
1228
+ //------------------------------------------------------------------------
1229
+ template<class VC>
1230
+ void path_base<VC>::modify_command(unsigned idx, unsigned cmd)
1231
+ {
1232
+ m_vertices.modify_command(idx, cmd);
1233
+ }
1234
+
1235
+ //------------------------------------------------------------------------
1236
+ template<class VC>
1237
+ inline void path_base<VC>::rewind(unsigned path_id)
1238
+ {
1239
+ m_iterator = path_id;
1240
+ }
1241
+
1242
+ //------------------------------------------------------------------------
1243
+ template<class VC>
1244
+ inline unsigned path_base<VC>::vertex(double* x, double* y)
1245
+ {
1246
+ if(m_iterator >= m_vertices.total_vertices()) return path_cmd_stop;
1247
+ return m_vertices.vertex(m_iterator++, x, y);
1248
+ }
1249
+
1250
+ //------------------------------------------------------------------------
1251
+ template<class VC>
1252
+ unsigned path_base<VC>::perceive_polygon_orientation(unsigned start,
1253
+ unsigned end)
1254
+ {
1255
+ // Calculate signed area (double area to be exact)
1256
+ //---------------------
1257
+ unsigned np = end - start;
1258
+ double area = 0.0;
1259
+ unsigned i;
1260
+ for(i = 0; i < np; i++)
1261
+ {
1262
+ double x1, y1, x2, y2;
1263
+ m_vertices.vertex(start + i, &x1, &y1);
1264
+ m_vertices.vertex(start + (i + 1) % np, &x2, &y2);
1265
+ area += x1 * y2 - y1 * x2;
1266
+ }
1267
+ return (area < 0.0) ? path_flags_cw : path_flags_ccw;
1268
+ }
1269
+
1270
+ //------------------------------------------------------------------------
1271
+ template<class VC>
1272
+ void path_base<VC>::invert_polygon(unsigned start, unsigned end)
1273
+ {
1274
+ unsigned i;
1275
+ unsigned tmp_cmd = m_vertices.command(start);
1276
+
1277
+ --end; // Make "end" inclusive
1278
+
1279
+ // Shift all commands to one position
1280
+ for(i = start; i < end; i++)
1281
+ {
1282
+ m_vertices.modify_command(i, m_vertices.command(i + 1));
1283
+ }
1284
+
1285
+ // Assign starting command to the ending command
1286
+ m_vertices.modify_command(end, tmp_cmd);
1287
+
1288
+ // Reverse the polygon
1289
+ while(end > start)
1290
+ {
1291
+ m_vertices.swap_vertices(start++, end--);
1292
+ }
1293
+ }
1294
+
1295
+ //------------------------------------------------------------------------
1296
+ template<class VC>
1297
+ void path_base<VC>::invert_polygon(unsigned start)
1298
+ {
1299
+ // Skip all non-vertices at the beginning
1300
+ while(start < m_vertices.total_vertices() &&
1301
+ !is_vertex(m_vertices.command(start))) ++start;
1302
+
1303
+ // Skip all insignificant move_to
1304
+ while(start+1 < m_vertices.total_vertices() &&
1305
+ is_move_to(m_vertices.command(start)) &&
1306
+ is_move_to(m_vertices.command(start+1))) ++start;
1307
+
1308
+ // Find the last vertex
1309
+ unsigned end = start + 1;
1310
+ while(end < m_vertices.total_vertices() &&
1311
+ !is_next_poly(m_vertices.command(end))) ++end;
1312
+
1313
+ invert_polygon(start, end);
1314
+ }
1315
+
1316
+ //------------------------------------------------------------------------
1317
+ template<class VC>
1318
+ unsigned path_base<VC>::arrange_polygon_orientation(unsigned start,
1319
+ path_flags_e orientation)
1320
+ {
1321
+ if(orientation == path_flags_none) return start;
1322
+
1323
+ // Skip all non-vertices at the beginning
1324
+ while(start < m_vertices.total_vertices() &&
1325
+ !is_vertex(m_vertices.command(start))) ++start;
1326
+
1327
+ // Skip all insignificant move_to
1328
+ while(start+1 < m_vertices.total_vertices() &&
1329
+ is_move_to(m_vertices.command(start)) &&
1330
+ is_move_to(m_vertices.command(start+1))) ++start;
1331
+
1332
+ // Find the last vertex
1333
+ unsigned end = start + 1;
1334
+ while(end < m_vertices.total_vertices() &&
1335
+ !is_next_poly(m_vertices.command(end))) ++end;
1336
+
1337
+ if(end - start > 2)
1338
+ {
1339
+ if(perceive_polygon_orientation(start, end) != unsigned(orientation))
1340
+ {
1341
+ // Invert polygon, set orientation flag, and skip all end_poly
1342
+ invert_polygon(start, end);
1343
+ unsigned cmd;
1344
+ while(end < m_vertices.total_vertices() &&
1345
+ is_end_poly(cmd = m_vertices.command(end)))
1346
+ {
1347
+ m_vertices.modify_command(end++, set_orientation(cmd, orientation));
1348
+ }
1349
+ }
1350
+ }
1351
+ return end;
1352
+ }
1353
+
1354
+ //------------------------------------------------------------------------
1355
+ template<class VC>
1356
+ unsigned path_base<VC>::arrange_orientations(unsigned start,
1357
+ path_flags_e orientation)
1358
+ {
1359
+ if(orientation != path_flags_none)
1360
+ {
1361
+ while(start < m_vertices.total_vertices())
1362
+ {
1363
+ start = arrange_polygon_orientation(start, orientation);
1364
+ if(is_stop(m_vertices.command(start)))
1365
+ {
1366
+ ++start;
1367
+ break;
1368
+ }
1369
+ }
1370
+ }
1371
+ return start;
1372
+ }
1373
+
1374
+ //------------------------------------------------------------------------
1375
+ template<class VC>
1376
+ void path_base<VC>::arrange_orientations_all_paths(path_flags_e orientation)
1377
+ {
1378
+ if(orientation != path_flags_none)
1379
+ {
1380
+ unsigned start = 0;
1381
+ while(start < m_vertices.total_vertices())
1382
+ {
1383
+ start = arrange_orientations(start, orientation);
1384
+ }
1385
+ }
1386
+ }
1387
+
1388
+ //------------------------------------------------------------------------
1389
+ template<class VC>
1390
+ void path_base<VC>::flip_x(double x1, double x2)
1391
+ {
1392
+ unsigned i;
1393
+ double x, y;
1394
+ for(i = 0; i < m_vertices.total_vertices(); i++)
1395
+ {
1396
+ unsigned cmd = m_vertices.vertex(i, &x, &y);
1397
+ if(is_vertex(cmd))
1398
+ {
1399
+ m_vertices.modify_vertex(i, x2 - x + x1, y);
1400
+ }
1401
+ }
1402
+ }
1403
+
1404
+ //------------------------------------------------------------------------
1405
+ template<class VC>
1406
+ void path_base<VC>::flip_y(double y1, double y2)
1407
+ {
1408
+ unsigned i;
1409
+ double x, y;
1410
+ for(i = 0; i < m_vertices.total_vertices(); i++)
1411
+ {
1412
+ unsigned cmd = m_vertices.vertex(i, &x, &y);
1413
+ if(is_vertex(cmd))
1414
+ {
1415
+ m_vertices.modify_vertex(i, x, y2 - y + y1);
1416
+ }
1417
+ }
1418
+ }
1419
+
1420
+ //------------------------------------------------------------------------
1421
+ template<class VC>
1422
+ void path_base<VC>::translate(double dx, double dy, unsigned path_id)
1423
+ {
1424
+ unsigned num_ver = m_vertices.total_vertices();
1425
+ for(; path_id < num_ver; path_id++)
1426
+ {
1427
+ double x, y;
1428
+ unsigned cmd = m_vertices.vertex(path_id, &x, &y);
1429
+ if(is_stop(cmd)) break;
1430
+ if(is_vertex(cmd))
1431
+ {
1432
+ x += dx;
1433
+ y += dy;
1434
+ m_vertices.modify_vertex(path_id, x, y);
1435
+ }
1436
+ }
1437
+ }
1438
+
1439
+ //------------------------------------------------------------------------
1440
+ template<class VC>
1441
+ void path_base<VC>::translate_all_paths(double dx, double dy)
1442
+ {
1443
+ unsigned idx;
1444
+ unsigned num_ver = m_vertices.total_vertices();
1445
+ for(idx = 0; idx < num_ver; idx++)
1446
+ {
1447
+ double x, y;
1448
+ if(is_vertex(m_vertices.vertex(idx, &x, &y)))
1449
+ {
1450
+ x += dx;
1451
+ y += dy;
1452
+ m_vertices.modify_vertex(idx, x, y);
1453
+ }
1454
+ }
1455
+ }
1456
+
1457
+ //-----------------------------------------------------vertex_stl_storage
1458
+ template<class Container> class vertex_stl_storage
1459
+ {
1460
+ public:
1461
+ typedef typename Container::value_type vertex_type;
1462
+ typedef typename vertex_type::value_type value_type;
1463
+
1464
+ void remove_all() { m_vertices.clear(); }
1465
+ void free_all() { m_vertices.clear(); }
1466
+
1467
+ void add_vertex(double x, double y, unsigned cmd)
1468
+ {
1469
+ m_vertices.push_back(vertex_type(value_type(x),
1470
+ value_type(y),
1471
+ int8u(cmd)));
1472
+ }
1473
+
1474
+ void modify_vertex(unsigned idx, double x, double y)
1475
+ {
1476
+ vertex_type& v = m_vertices[idx];
1477
+ v.x = value_type(x);
1478
+ v.y = value_type(y);
1479
+ }
1480
+
1481
+ void modify_vertex(unsigned idx, double x, double y, unsigned cmd)
1482
+ {
1483
+ vertex_type& v = m_vertices[idx];
1484
+ v.x = value_type(x);
1485
+ v.y = value_type(y);
1486
+ v.cmd = int8u(cmd);
1487
+ }
1488
+
1489
+ void modify_command(unsigned idx, unsigned cmd)
1490
+ {
1491
+ m_vertices[idx].cmd = int8u(cmd);
1492
+ }
1493
+
1494
+ void swap_vertices(unsigned v1, unsigned v2)
1495
+ {
1496
+ vertex_type t = m_vertices[v1];
1497
+ m_vertices[v1] = m_vertices[v2];
1498
+ m_vertices[v2] = t;
1499
+ }
1500
+
1501
+ unsigned last_command() const
1502
+ {
1503
+ return m_vertices.size() ?
1504
+ m_vertices[m_vertices.size() - 1].cmd :
1505
+ path_cmd_stop;
1506
+ }
1507
+
1508
+ unsigned last_vertex(double* x, double* y) const
1509
+ {
1510
+ if(m_vertices.size() == 0)
1511
+ {
1512
+ *x = *y = 0.0;
1513
+ return path_cmd_stop;
1514
+ }
1515
+ return vertex(m_vertices.size() - 1, x, y);
1516
+ }
1517
+
1518
+ unsigned prev_vertex(double* x, double* y) const
1519
+ {
1520
+ if(m_vertices.size() < 2)
1521
+ {
1522
+ *x = *y = 0.0;
1523
+ return path_cmd_stop;
1524
+ }
1525
+ return vertex(m_vertices.size() - 2, x, y);
1526
+ }
1527
+
1528
+ double last_x() const
1529
+ {
1530
+ return m_vertices.size() ? m_vertices[m_vertices.size() - 1].x : 0.0;
1531
+ }
1532
+
1533
+ double last_y() const
1534
+ {
1535
+ return m_vertices.size() ? m_vertices[m_vertices.size() - 1].y : 0.0;
1536
+ }
1537
+
1538
+ unsigned total_vertices() const
1539
+ {
1540
+ return m_vertices.size();
1541
+ }
1542
+
1543
+ unsigned vertex(unsigned idx, double* x, double* y) const
1544
+ {
1545
+ const vertex_type& v = m_vertices[idx];
1546
+ *x = v.x;
1547
+ *y = v.y;
1548
+ return v.cmd;
1549
+ }
1550
+
1551
+ unsigned command(unsigned idx) const
1552
+ {
1553
+ return m_vertices[idx].cmd;
1554
+ }
1555
+
1556
+ private:
1557
+ Container m_vertices;
1558
+ };
1559
+
1560
+ //-----------------------------------------------------------path_storage
1561
+ typedef path_base<vertex_block_storage<double> > path_storage;
1562
+
1563
+ // Example of declarations path_storage with pod_bvector as a container
1564
+ //-----------------------------------------------------------------------
1565
+ //typedef path_base<vertex_stl_storage<pod_bvector<vertex_d> > > path_storage;
1566
+
1567
+ }
1568
+
1569
+
1570
+
1571
+ // Example of declarations path_storage with std::vector as a container
1572
+ //---------------------------------------------------------------------------
1573
+ //#include <vector>
1574
+ //namespace agg
1575
+ //{
1576
+ // typedef path_base<vertex_stl_storage<std::vector<vertex_d> > > stl_path_storage;
1577
+ //}
1578
+
1579
+
1580
+
1581
+
1582
+ #endif
data/bundled_deps/agg/agg/agg_pixfmt_base.h ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+
16
+ #ifndef AGG_PIXFMT_BASE_INCLUDED
17
+ #define AGG_PIXFMT_BASE_INCLUDED
18
+
19
+ #include "agg_basics.h"
20
+ #include "agg_color_gray.h"
21
+ #include "agg_color_rgba.h"
22
+
23
+ namespace agg
24
+ {
25
+ struct pixfmt_gray_tag
26
+ {
27
+ };
28
+
29
+ struct pixfmt_rgb_tag
30
+ {
31
+ };
32
+
33
+ struct pixfmt_rgba_tag
34
+ {
35
+ };
36
+
37
+ //--------------------------------------------------------------blender_base
38
+ template<class ColorT, class Order = void>
39
+ struct blender_base
40
+ {
41
+ typedef ColorT color_type;
42
+ typedef Order order_type;
43
+ typedef typename color_type::value_type value_type;
44
+
45
+ static rgba get(value_type r, value_type g, value_type b, value_type a, cover_type cover = cover_full)
46
+ {
47
+ if (cover > cover_none)
48
+ {
49
+ rgba c(
50
+ color_type::to_double(r),
51
+ color_type::to_double(g),
52
+ color_type::to_double(b),
53
+ color_type::to_double(a));
54
+
55
+ if (cover < cover_full)
56
+ {
57
+ double x = double(cover) / cover_full;
58
+ c.r *= x;
59
+ c.g *= x;
60
+ c.b *= x;
61
+ c.a *= x;
62
+ }
63
+
64
+ return c;
65
+ }
66
+ else return rgba::no_color();
67
+ }
68
+
69
+ static rgba get(const value_type* p, cover_type cover = cover_full)
70
+ {
71
+ return get(
72
+ p[order_type::R],
73
+ p[order_type::G],
74
+ p[order_type::B],
75
+ p[order_type::A],
76
+ cover);
77
+ }
78
+
79
+ static void set(value_type* p, value_type r, value_type g, value_type b, value_type a)
80
+ {
81
+ p[order_type::R] = r;
82
+ p[order_type::G] = g;
83
+ p[order_type::B] = b;
84
+ p[order_type::A] = a;
85
+ }
86
+
87
+ static void set(value_type* p, const rgba& c)
88
+ {
89
+ p[order_type::R] = color_type::from_double(c.r);
90
+ p[order_type::G] = color_type::from_double(c.g);
91
+ p[order_type::B] = color_type::from_double(c.b);
92
+ p[order_type::A] = color_type::from_double(c.a);
93
+ }
94
+ };
95
+ }
96
+
97
+ #endif
data/bundled_deps/agg/agg/agg_pixfmt_gray.h ADDED
@@ -0,0 +1,738 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+ //
16
+ // Adaptation for high precision colors has been sponsored by
17
+ // Liberty Technology Systems, Inc., visit http://lib-sys.com
18
+ //
19
+ // Liberty Technology Systems, Inc. is the provider of
20
+ // PostScript and PDF technology for software developers.
21
+ //
22
+ //----------------------------------------------------------------------------
23
+
24
+ #ifndef AGG_PIXFMT_GRAY_INCLUDED
25
+ #define AGG_PIXFMT_GRAY_INCLUDED
26
+
27
+ #include <string.h>
28
+ #include "agg_pixfmt_base.h"
29
+ #include "agg_rendering_buffer.h"
30
+
31
+ namespace agg
32
+ {
33
+
34
+ //============================================================blender_gray
35
+ template<class ColorT> struct blender_gray
36
+ {
37
+ typedef ColorT color_type;
38
+ typedef typename color_type::value_type value_type;
39
+ typedef typename color_type::calc_type calc_type;
40
+ typedef typename color_type::long_type long_type;
41
+
42
+ // Blend pixels using the non-premultiplied form of Alvy-Ray Smith's
43
+ // compositing function. Since the render buffer is opaque we skip the
44
+ // initial premultiply and final demultiply.
45
+
46
+ static AGG_INLINE void blend_pix(value_type* p,
47
+ value_type cv, value_type alpha, cover_type cover)
48
+ {
49
+ blend_pix(p, cv, color_type::mult_cover(alpha, cover));
50
+ }
51
+
52
+ static AGG_INLINE void blend_pix(value_type* p,
53
+ value_type cv, value_type alpha)
54
+ {
55
+ *p = color_type::lerp(*p, cv, alpha);
56
+ }
57
+ };
58
+
59
+
60
+ //======================================================blender_gray_pre
61
+ template<class ColorT> struct blender_gray_pre
62
+ {
63
+ typedef ColorT color_type;
64
+ typedef typename color_type::value_type value_type;
65
+ typedef typename color_type::calc_type calc_type;
66
+ typedef typename color_type::long_type long_type;
67
+
68
+ // Blend pixels using the premultiplied form of Alvy-Ray Smith's
69
+ // compositing function.
70
+
71
+ static AGG_INLINE void blend_pix(value_type* p,
72
+ value_type cv, value_type alpha, cover_type cover)
73
+ {
74
+ blend_pix(p, color_type::mult_cover(cv, cover), color_type::mult_cover(alpha, cover));
75
+ }
76
+
77
+ static AGG_INLINE void blend_pix(value_type* p,
78
+ value_type cv, value_type alpha)
79
+ {
80
+ *p = color_type::prelerp(*p, cv, alpha);
81
+ }
82
+ };
83
+
84
+
85
+
86
+ //=====================================================apply_gamma_dir_gray
87
+ template<class ColorT, class GammaLut> class apply_gamma_dir_gray
88
+ {
89
+ public:
90
+ typedef typename ColorT::value_type value_type;
91
+
92
+ apply_gamma_dir_gray(const GammaLut& gamma) : m_gamma(gamma) {}
93
+
94
+ AGG_INLINE void operator () (value_type* p)
95
+ {
96
+ *p = m_gamma.dir(*p);
97
+ }
98
+
99
+ private:
100
+ const GammaLut& m_gamma;
101
+ };
102
+
103
+
104
+
105
+ //=====================================================apply_gamma_inv_gray
106
+ template<class ColorT, class GammaLut> class apply_gamma_inv_gray
107
+ {
108
+ public:
109
+ typedef typename ColorT::value_type value_type;
110
+
111
+ apply_gamma_inv_gray(const GammaLut& gamma) : m_gamma(gamma) {}
112
+
113
+ AGG_INLINE void operator () (value_type* p)
114
+ {
115
+ *p = m_gamma.inv(*p);
116
+ }
117
+
118
+ private:
119
+ const GammaLut& m_gamma;
120
+ };
121
+
122
+
123
+
124
+ //=================================================pixfmt_alpha_blend_gray
125
+ template<class Blender, class RenBuf, unsigned Step = 1, unsigned Offset = 0>
126
+ class pixfmt_alpha_blend_gray
127
+ {
128
+ public:
129
+ typedef pixfmt_gray_tag pixfmt_category;
130
+ typedef RenBuf rbuf_type;
131
+ typedef typename rbuf_type::row_data row_data;
132
+ typedef Blender blender_type;
133
+ typedef typename blender_type::color_type color_type;
134
+ typedef int order_type; // A fake one
135
+ typedef typename color_type::value_type value_type;
136
+ typedef typename color_type::calc_type calc_type;
137
+ enum
138
+ {
139
+ num_components = 1,
140
+ pix_width = sizeof(value_type) * Step,
141
+ pix_step = Step,
142
+ pix_offset = Offset,
143
+ };
144
+ struct pixel_type
145
+ {
146
+ value_type c[num_components];
147
+
148
+ void set(value_type v)
149
+ {
150
+ c[0] = v;
151
+ }
152
+
153
+ void set(const color_type& color)
154
+ {
155
+ set(color.v);
156
+ }
157
+
158
+ void get(value_type& v) const
159
+ {
160
+ v = c[0];
161
+ }
162
+
163
+ color_type get() const
164
+ {
165
+ return color_type(c[0]);
166
+ }
167
+
168
+ pixel_type* next()
169
+ {
170
+ return (pixel_type*)(c + pix_step);
171
+ }
172
+
173
+ const pixel_type* next() const
174
+ {
175
+ return (const pixel_type*)(c + pix_step);
176
+ }
177
+
178
+ pixel_type* advance(int n)
179
+ {
180
+ return (pixel_type*)(c + n * pix_step);
181
+ }
182
+
183
+ const pixel_type* advance(int n) const
184
+ {
185
+ return (const pixel_type*)(c + n * pix_step);
186
+ }
187
+ };
188
+
189
+ private:
190
+ //--------------------------------------------------------------------
191
+ AGG_INLINE void blend_pix(pixel_type* p,
192
+ value_type v, value_type a,
193
+ unsigned cover)
194
+ {
195
+ blender_type::blend_pix(p->c, v, a, cover);
196
+ }
197
+
198
+ //--------------------------------------------------------------------
199
+ AGG_INLINE void blend_pix(pixel_type* p, value_type v, value_type a)
200
+ {
201
+ blender_type::blend_pix(p->c, v, a);
202
+ }
203
+
204
+ //--------------------------------------------------------------------
205
+ AGG_INLINE void blend_pix(pixel_type* p, const color_type& c, unsigned cover)
206
+ {
207
+ blender_type::blend_pix(p->c, c.v, c.a, cover);
208
+ }
209
+
210
+ //--------------------------------------------------------------------
211
+ AGG_INLINE void blend_pix(pixel_type* p, const color_type& c)
212
+ {
213
+ blender_type::blend_pix(p->c, c.v, c.a);
214
+ }
215
+
216
+ //--------------------------------------------------------------------
217
+ AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c, unsigned cover)
218
+ {
219
+ if (!c.is_transparent())
220
+ {
221
+ if (c.is_opaque() && cover == cover_mask)
222
+ {
223
+ p->set(c);
224
+ }
225
+ else
226
+ {
227
+ blend_pix(p, c, cover);
228
+ }
229
+ }
230
+ }
231
+
232
+ //--------------------------------------------------------------------
233
+ AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c)
234
+ {
235
+ if (!c.is_transparent())
236
+ {
237
+ if (c.is_opaque())
238
+ {
239
+ p->set(c);
240
+ }
241
+ else
242
+ {
243
+ blend_pix(p, c);
244
+ }
245
+ }
246
+ }
247
+
248
+ public:
249
+ //--------------------------------------------------------------------
250
+ explicit pixfmt_alpha_blend_gray(rbuf_type& rb) :
251
+ m_rbuf(&rb)
252
+ {}
253
+ void attach(rbuf_type& rb) { m_rbuf = &rb; }
254
+ //--------------------------------------------------------------------
255
+
256
+ template<class PixFmt>
257
+ bool attach(PixFmt& pixf, int x1, int y1, int x2, int y2)
258
+ {
259
+ rect_i r(x1, y1, x2, y2);
260
+ if (r.clip(rect_i(0, 0, pixf.width()-1, pixf.height()-1)))
261
+ {
262
+ int stride = pixf.stride();
263
+ m_rbuf->attach(pixf.pix_ptr(r.x1, stride < 0 ? r.y2 : r.y1),
264
+ (r.x2 - r.x1) + 1,
265
+ (r.y2 - r.y1) + 1,
266
+ stride);
267
+ return true;
268
+ }
269
+ return false;
270
+ }
271
+
272
+ //--------------------------------------------------------------------
273
+ AGG_INLINE unsigned width() const { return m_rbuf->width(); }
274
+ AGG_INLINE unsigned height() const { return m_rbuf->height(); }
275
+ AGG_INLINE int stride() const { return m_rbuf->stride(); }
276
+
277
+ //--------------------------------------------------------------------
278
+ int8u* row_ptr(int y) { return m_rbuf->row_ptr(y); }
279
+ const int8u* row_ptr(int y) const { return m_rbuf->row_ptr(y); }
280
+ row_data row(int y) const { return m_rbuf->row(y); }
281
+
282
+ //--------------------------------------------------------------------
283
+ AGG_INLINE int8u* pix_ptr(int x, int y)
284
+ {
285
+ return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step + pix_offset);
286
+ }
287
+
288
+ AGG_INLINE const int8u* pix_ptr(int x, int y) const
289
+ {
290
+ return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step + pix_offset);
291
+ }
292
+
293
+ // Return pointer to pixel value, forcing row to be allocated.
294
+ AGG_INLINE pixel_type* pix_value_ptr(int x, int y, unsigned len)
295
+ {
296
+ return (pixel_type*)(m_rbuf->row_ptr(x, y, len) + sizeof(value_type) * (x * pix_step + pix_offset));
297
+ }
298
+
299
+ // Return pointer to pixel value, or null if row not allocated.
300
+ AGG_INLINE const pixel_type* pix_value_ptr(int x, int y) const
301
+ {
302
+ int8u* p = m_rbuf->row_ptr(y);
303
+ return p ? (pixel_type*)(p + sizeof(value_type) * (x * pix_step + pix_offset)) : 0;
304
+ }
305
+
306
+ // Get pixel pointer from raw buffer pointer.
307
+ AGG_INLINE static pixel_type* pix_value_ptr(void* p)
308
+ {
309
+ return (pixel_type*)((value_type*)p + pix_offset);
310
+ }
311
+
312
+ // Get pixel pointer from raw buffer pointer.
313
+ AGG_INLINE static const pixel_type* pix_value_ptr(const void* p)
314
+ {
315
+ return (const pixel_type*)((const value_type*)p + pix_offset);
316
+ }
317
+
318
+ //--------------------------------------------------------------------
319
+ AGG_INLINE static void write_plain_color(void* p, color_type c)
320
+ {
321
+ // Grayscale formats are implicitly premultiplied.
322
+ c.premultiply();
323
+ pix_value_ptr(p)->set(c);
324
+ }
325
+
326
+ //--------------------------------------------------------------------
327
+ AGG_INLINE static color_type read_plain_color(const void* p)
328
+ {
329
+ return pix_value_ptr(p)->get();
330
+ }
331
+
332
+ //--------------------------------------------------------------------
333
+ AGG_INLINE static void make_pix(int8u* p, const color_type& c)
334
+ {
335
+ ((pixel_type*)p)->set(c);
336
+ }
337
+
338
+ //--------------------------------------------------------------------
339
+ AGG_INLINE color_type pixel(int x, int y) const
340
+ {
341
+ if (const pixel_type* p = pix_value_ptr(x, y))
342
+ {
343
+ return p->get();
344
+ }
345
+ return color_type::no_color();
346
+ }
347
+
348
+ //--------------------------------------------------------------------
349
+ AGG_INLINE void copy_pixel(int x, int y, const color_type& c)
350
+ {
351
+ pix_value_ptr(x, y, 1)->set(c);
352
+ }
353
+
354
+ //--------------------------------------------------------------------
355
+ AGG_INLINE void blend_pixel(int x, int y, const color_type& c, int8u cover)
356
+ {
357
+ copy_or_blend_pix(pix_value_ptr(x, y, 1), c, cover);
358
+ }
359
+
360
+ //--------------------------------------------------------------------
361
+ AGG_INLINE void copy_hline(int x, int y,
362
+ unsigned len,
363
+ const color_type& c)
364
+ {
365
+ pixel_type* p = pix_value_ptr(x, y, len);
366
+ do
367
+ {
368
+ p->set(c);
369
+ p = p->next();
370
+ }
371
+ while(--len);
372
+ }
373
+
374
+
375
+ //--------------------------------------------------------------------
376
+ AGG_INLINE void copy_vline(int x, int y,
377
+ unsigned len,
378
+ const color_type& c)
379
+ {
380
+ do
381
+ {
382
+ pix_value_ptr(x, y++, 1)->set(c);
383
+ }
384
+ while (--len);
385
+ }
386
+
387
+
388
+ //--------------------------------------------------------------------
389
+ void blend_hline(int x, int y,
390
+ unsigned len,
391
+ const color_type& c,
392
+ int8u cover)
393
+ {
394
+ if (!c.is_transparent())
395
+ {
396
+ pixel_type* p = pix_value_ptr(x, y, len);
397
+
398
+ if (c.is_opaque() && cover == cover_mask)
399
+ {
400
+ do
401
+ {
402
+ p->set(c);
403
+ p = p->next();
404
+ }
405
+ while (--len);
406
+ }
407
+ else
408
+ {
409
+ do
410
+ {
411
+ blend_pix(p, c, cover);
412
+ p = p->next();
413
+ }
414
+ while (--len);
415
+ }
416
+ }
417
+ }
418
+
419
+
420
+ //--------------------------------------------------------------------
421
+ void blend_vline(int x, int y,
422
+ unsigned len,
423
+ const color_type& c,
424
+ int8u cover)
425
+ {
426
+ if (!c.is_transparent())
427
+ {
428
+ if (c.is_opaque() && cover == cover_mask)
429
+ {
430
+ do
431
+ {
432
+ pix_value_ptr(x, y++, 1)->set(c);
433
+ }
434
+ while (--len);
435
+ }
436
+ else
437
+ {
438
+ do
439
+ {
440
+ blend_pix(pix_value_ptr(x, y++, 1), c, cover);
441
+ }
442
+ while (--len);
443
+ }
444
+ }
445
+ }
446
+
447
+
448
+ //--------------------------------------------------------------------
449
+ void blend_solid_hspan(int x, int y,
450
+ unsigned len,
451
+ const color_type& c,
452
+ const int8u* covers)
453
+ {
454
+ if (!c.is_transparent())
455
+ {
456
+ pixel_type* p = pix_value_ptr(x, y, len);
457
+
458
+ do
459
+ {
460
+ if (c.is_opaque() && *covers == cover_mask)
461
+ {
462
+ p->set(c);
463
+ }
464
+ else
465
+ {
466
+ blend_pix(p, c, *covers);
467
+ }
468
+ p = p->next();
469
+ ++covers;
470
+ }
471
+ while (--len);
472
+ }
473
+ }
474
+
475
+
476
+ //--------------------------------------------------------------------
477
+ void blend_solid_vspan(int x, int y,
478
+ unsigned len,
479
+ const color_type& c,
480
+ const int8u* covers)
481
+ {
482
+ if (!c.is_transparent())
483
+ {
484
+ do
485
+ {
486
+ pixel_type* p = pix_value_ptr(x, y++, 1);
487
+
488
+ if (c.is_opaque() && *covers == cover_mask)
489
+ {
490
+ p->set(c);
491
+ }
492
+ else
493
+ {
494
+ blend_pix(p, c, *covers);
495
+ }
496
+ ++covers;
497
+ }
498
+ while (--len);
499
+ }
500
+ }
501
+
502
+
503
+ //--------------------------------------------------------------------
504
+ void copy_color_hspan(int x, int y,
505
+ unsigned len,
506
+ const color_type* colors)
507
+ {
508
+ pixel_type* p = pix_value_ptr(x, y, len);
509
+
510
+ do
511
+ {
512
+ p->set(*colors++);
513
+ p = p->next();
514
+ }
515
+ while (--len);
516
+ }
517
+
518
+
519
+ //--------------------------------------------------------------------
520
+ void copy_color_vspan(int x, int y,
521
+ unsigned len,
522
+ const color_type* colors)
523
+ {
524
+ do
525
+ {
526
+ pix_value_ptr(x, y++, 1)->set(*colors++);
527
+ }
528
+ while (--len);
529
+ }
530
+
531
+
532
+ //--------------------------------------------------------------------
533
+ void blend_color_hspan(int x, int y,
534
+ unsigned len,
535
+ const color_type* colors,
536
+ const int8u* covers,
537
+ int8u cover)
538
+ {
539
+ pixel_type* p = pix_value_ptr(x, y, len);
540
+
541
+ if (covers)
542
+ {
543
+ do
544
+ {
545
+ copy_or_blend_pix(p, *colors++, *covers++);
546
+ p = p->next();
547
+ }
548
+ while (--len);
549
+ }
550
+ else
551
+ {
552
+ if (cover == cover_mask)
553
+ {
554
+ do
555
+ {
556
+ copy_or_blend_pix(p, *colors++);
557
+ p = p->next();
558
+ }
559
+ while (--len);
560
+ }
561
+ else
562
+ {
563
+ do
564
+ {
565
+ copy_or_blend_pix(p, *colors++, cover);
566
+ p = p->next();
567
+ }
568
+ while (--len);
569
+ }
570
+ }
571
+ }
572
+
573
+
574
+ //--------------------------------------------------------------------
575
+ void blend_color_vspan(int x, int y,
576
+ unsigned len,
577
+ const color_type* colors,
578
+ const int8u* covers,
579
+ int8u cover)
580
+ {
581
+ if (covers)
582
+ {
583
+ do
584
+ {
585
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, *covers++);
586
+ }
587
+ while (--len);
588
+ }
589
+ else
590
+ {
591
+ if (cover == cover_mask)
592
+ {
593
+ do
594
+ {
595
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++);
596
+ }
597
+ while (--len);
598
+ }
599
+ else
600
+ {
601
+ do
602
+ {
603
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, cover);
604
+ }
605
+ while (--len);
606
+ }
607
+ }
608
+ }
609
+
610
+ //--------------------------------------------------------------------
611
+ template<class Function> void for_each_pixel(Function f)
612
+ {
613
+ unsigned y;
614
+ for (y = 0; y < height(); ++y)
615
+ {
616
+ row_data r = m_rbuf->row(y);
617
+ if (r.ptr)
618
+ {
619
+ unsigned len = r.x2 - r.x1 + 1;
620
+ pixel_type* p = pix_value_ptr(r.x1, y, len);
621
+ do
622
+ {
623
+ f(p->c);
624
+ p = p->next();
625
+ }
626
+ while (--len);
627
+ }
628
+ }
629
+ }
630
+
631
+ //--------------------------------------------------------------------
632
+ template<class GammaLut> void apply_gamma_dir(const GammaLut& g)
633
+ {
634
+ for_each_pixel(apply_gamma_dir_gray<color_type, GammaLut>(g));
635
+ }
636
+
637
+ //--------------------------------------------------------------------
638
+ template<class GammaLut> void apply_gamma_inv(const GammaLut& g)
639
+ {
640
+ for_each_pixel(apply_gamma_inv_gray<color_type, GammaLut>(g));
641
+ }
642
+
643
+ //--------------------------------------------------------------------
644
+ template<class RenBuf2>
645
+ void copy_from(const RenBuf2& from,
646
+ int xdst, int ydst,
647
+ int xsrc, int ysrc,
648
+ unsigned len)
649
+ {
650
+ if (const int8u* p = from.row_ptr(ysrc))
651
+ {
652
+ memmove(m_rbuf->row_ptr(xdst, ydst, len) + xdst * pix_width,
653
+ p + xsrc * pix_width,
654
+ len * pix_width);
655
+ }
656
+ }
657
+
658
+ //--------------------------------------------------------------------
659
+ // Blend from single color, using grayscale surface as alpha channel.
660
+ template<class SrcPixelFormatRenderer>
661
+ void blend_from_color(const SrcPixelFormatRenderer& from,
662
+ const color_type& color,
663
+ int xdst, int ydst,
664
+ int xsrc, int ysrc,
665
+ unsigned len,
666
+ int8u cover)
667
+ {
668
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
669
+ typedef typename SrcPixelFormatRenderer::color_type src_color_type;
670
+
671
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
672
+ {
673
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
674
+
675
+ do
676
+ {
677
+ copy_or_blend_pix(pdst, color, src_color_type::scale_cover(cover, psrc->c[0]));
678
+ psrc = psrc->next();
679
+ pdst = pdst->next();
680
+ }
681
+ while (--len);
682
+ }
683
+ }
684
+
685
+ //--------------------------------------------------------------------
686
+ // Blend from color table, using grayscale surface as indexes into table.
687
+ // Obviously, this only works for integer value types.
688
+ template<class SrcPixelFormatRenderer>
689
+ void blend_from_lut(const SrcPixelFormatRenderer& from,
690
+ const color_type* color_lut,
691
+ int xdst, int ydst,
692
+ int xsrc, int ysrc,
693
+ unsigned len,
694
+ int8u cover)
695
+ {
696
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
697
+
698
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
699
+ {
700
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
701
+
702
+ do
703
+ {
704
+ copy_or_blend_pix(pdst, color_lut[psrc->c[0]], cover);
705
+ psrc = psrc->next();
706
+ pdst = pdst->next();
707
+ }
708
+ while (--len);
709
+ }
710
+ }
711
+
712
+ private:
713
+ rbuf_type* m_rbuf;
714
+ };
715
+
716
+ typedef blender_gray<gray8> blender_gray8;
717
+ typedef blender_gray<sgray8> blender_sgray8;
718
+ typedef blender_gray<gray16> blender_gray16;
719
+ typedef blender_gray<gray32> blender_gray32;
720
+
721
+ typedef blender_gray_pre<gray8> blender_gray8_pre;
722
+ typedef blender_gray_pre<sgray8> blender_sgray8_pre;
723
+ typedef blender_gray_pre<gray16> blender_gray16_pre;
724
+ typedef blender_gray_pre<gray32> blender_gray32_pre;
725
+
726
+ typedef pixfmt_alpha_blend_gray<blender_gray8, rendering_buffer> pixfmt_gray8;
727
+ typedef pixfmt_alpha_blend_gray<blender_sgray8, rendering_buffer> pixfmt_sgray8;
728
+ typedef pixfmt_alpha_blend_gray<blender_gray16, rendering_buffer> pixfmt_gray16;
729
+ typedef pixfmt_alpha_blend_gray<blender_gray32, rendering_buffer> pixfmt_gray32;
730
+
731
+ typedef pixfmt_alpha_blend_gray<blender_gray8_pre, rendering_buffer> pixfmt_gray8_pre;
732
+ typedef pixfmt_alpha_blend_gray<blender_sgray8_pre, rendering_buffer> pixfmt_sgray8_pre;
733
+ typedef pixfmt_alpha_blend_gray<blender_gray16_pre, rendering_buffer> pixfmt_gray16_pre;
734
+ typedef pixfmt_alpha_blend_gray<blender_gray32_pre, rendering_buffer> pixfmt_gray32_pre;
735
+ }
736
+
737
+ #endif
738
+
data/bundled_deps/agg/agg/agg_pixfmt_rgb.h ADDED
@@ -0,0 +1,995 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+ //
16
+ // Adaptation for high precision colors has been sponsored by
17
+ // Liberty Technology Systems, Inc., visit http://lib-sys.com
18
+ //
19
+ // Liberty Technology Systems, Inc. is the provider of
20
+ // PostScript and PDF technology for software developers.
21
+ //
22
+ //----------------------------------------------------------------------------
23
+
24
+ #ifndef AGG_PIXFMT_RGB_INCLUDED
25
+ #define AGG_PIXFMT_RGB_INCLUDED
26
+
27
+ #include <string.h>
28
+ #include "agg_pixfmt_base.h"
29
+ #include "agg_rendering_buffer.h"
30
+
31
+ namespace agg
32
+ {
33
+
34
+ //=====================================================apply_gamma_dir_rgb
35
+ template<class ColorT, class Order, class GammaLut> class apply_gamma_dir_rgb
36
+ {
37
+ public:
38
+ typedef typename ColorT::value_type value_type;
39
+
40
+ apply_gamma_dir_rgb(const GammaLut& gamma) : m_gamma(gamma) {}
41
+
42
+ AGG_INLINE void operator () (value_type* p)
43
+ {
44
+ p[Order::R] = m_gamma.dir(p[Order::R]);
45
+ p[Order::G] = m_gamma.dir(p[Order::G]);
46
+ p[Order::B] = m_gamma.dir(p[Order::B]);
47
+ }
48
+
49
+ private:
50
+ const GammaLut& m_gamma;
51
+ };
52
+
53
+
54
+
55
+ //=====================================================apply_gamma_inv_rgb
56
+ template<class ColorT, class Order, class GammaLut> class apply_gamma_inv_rgb
57
+ {
58
+ public:
59
+ typedef typename ColorT::value_type value_type;
60
+
61
+ apply_gamma_inv_rgb(const GammaLut& gamma) : m_gamma(gamma) {}
62
+
63
+ AGG_INLINE void operator () (value_type* p)
64
+ {
65
+ p[Order::R] = m_gamma.inv(p[Order::R]);
66
+ p[Order::G] = m_gamma.inv(p[Order::G]);
67
+ p[Order::B] = m_gamma.inv(p[Order::B]);
68
+ }
69
+
70
+ private:
71
+ const GammaLut& m_gamma;
72
+ };
73
+
74
+
75
+ //=========================================================blender_rgb
76
+ template<class ColorT, class Order>
77
+ struct blender_rgb
78
+ {
79
+ typedef ColorT color_type;
80
+ typedef Order order_type;
81
+ typedef typename color_type::value_type value_type;
82
+ typedef typename color_type::calc_type calc_type;
83
+ typedef typename color_type::long_type long_type;
84
+
85
+ // Blend pixels using the non-premultiplied form of Alvy-Ray Smith's
86
+ // compositing function. Since the render buffer is opaque we skip the
87
+ // initial premultiply and final demultiply.
88
+
89
+ //--------------------------------------------------------------------
90
+ static AGG_INLINE void blend_pix(value_type* p,
91
+ value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover)
92
+ {
93
+ blend_pix(p, cr, cg, cb, color_type::mult_cover(alpha, cover));
94
+ }
95
+
96
+ //--------------------------------------------------------------------
97
+ static AGG_INLINE void blend_pix(value_type* p,
98
+ value_type cr, value_type cg, value_type cb, value_type alpha)
99
+ {
100
+ p[Order::R] = color_type::lerp(p[Order::R], cr, alpha);
101
+ p[Order::G] = color_type::lerp(p[Order::G], cg, alpha);
102
+ p[Order::B] = color_type::lerp(p[Order::B], cb, alpha);
103
+ }
104
+ };
105
+
106
+ //======================================================blender_rgb_pre
107
+ template<class ColorT, class Order>
108
+ struct blender_rgb_pre
109
+ {
110
+ typedef ColorT color_type;
111
+ typedef Order order_type;
112
+ typedef typename color_type::value_type value_type;
113
+ typedef typename color_type::calc_type calc_type;
114
+ typedef typename color_type::long_type long_type;
115
+
116
+ // Blend pixels using the premultiplied form of Alvy-Ray Smith's
117
+ // compositing function.
118
+
119
+ //--------------------------------------------------------------------
120
+ static AGG_INLINE void blend_pix(value_type* p,
121
+ value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover)
122
+ {
123
+ blend_pix(p,
124
+ color_type::mult_cover(cr, cover),
125
+ color_type::mult_cover(cg, cover),
126
+ color_type::mult_cover(cb, cover),
127
+ color_type::mult_cover(alpha, cover));
128
+ }
129
+
130
+ //--------------------------------------------------------------------
131
+ static AGG_INLINE void blend_pix(value_type* p,
132
+ value_type cr, value_type cg, value_type cb, value_type alpha)
133
+ {
134
+ p[Order::R] = color_type::prelerp(p[Order::R], cr, alpha);
135
+ p[Order::G] = color_type::prelerp(p[Order::G], cg, alpha);
136
+ p[Order::B] = color_type::prelerp(p[Order::B], cb, alpha);
137
+ }
138
+ };
139
+
140
+ //===================================================blender_rgb_gamma
141
+ template<class ColorT, class Order, class Gamma>
142
+ class blender_rgb_gamma : public blender_base<ColorT, Order>
143
+ {
144
+ public:
145
+ typedef ColorT color_type;
146
+ typedef Order order_type;
147
+ typedef Gamma gamma_type;
148
+ typedef typename color_type::value_type value_type;
149
+ typedef typename color_type::calc_type calc_type;
150
+ typedef typename color_type::long_type long_type;
151
+
152
+ //--------------------------------------------------------------------
153
+ blender_rgb_gamma() : m_gamma(0) {}
154
+ void gamma(const gamma_type& g) { m_gamma = &g; }
155
+
156
+ //--------------------------------------------------------------------
157
+ AGG_INLINE void blend_pix(value_type* p,
158
+ value_type cr, value_type cg, value_type cb, value_type alpha, cover_type cover)
159
+ {
160
+ blend_pix(p, cr, cg, cb, color_type::mult_cover(alpha, cover));
161
+ }
162
+
163
+ //--------------------------------------------------------------------
164
+ AGG_INLINE void blend_pix(value_type* p,
165
+ value_type cr, value_type cg, value_type cb, value_type alpha)
166
+ {
167
+ calc_type r = m_gamma->dir(p[Order::R]);
168
+ calc_type g = m_gamma->dir(p[Order::G]);
169
+ calc_type b = m_gamma->dir(p[Order::B]);
170
+ p[Order::R] = m_gamma->inv(color_type::downscale((m_gamma->dir(cr) - r) * alpha) + r);
171
+ p[Order::G] = m_gamma->inv(color_type::downscale((m_gamma->dir(cg) - g) * alpha) + g);
172
+ p[Order::B] = m_gamma->inv(color_type::downscale((m_gamma->dir(cb) - b) * alpha) + b);
173
+ }
174
+
175
+ private:
176
+ const gamma_type* m_gamma;
177
+ };
178
+
179
+
180
+ //==================================================pixfmt_alpha_blend_rgb
181
+ template<class Blender, class RenBuf, unsigned Step, unsigned Offset = 0>
182
+ class pixfmt_alpha_blend_rgb
183
+ {
184
+ public:
185
+ typedef pixfmt_rgb_tag pixfmt_category;
186
+ typedef RenBuf rbuf_type;
187
+ typedef Blender blender_type;
188
+ typedef typename rbuf_type::row_data row_data;
189
+ typedef typename blender_type::color_type color_type;
190
+ typedef typename blender_type::order_type order_type;
191
+ typedef typename color_type::value_type value_type;
192
+ typedef typename color_type::calc_type calc_type;
193
+ enum
194
+ {
195
+ num_components = 3,
196
+ pix_step = Step,
197
+ pix_offset = Offset,
198
+ pix_width = sizeof(value_type) * pix_step
199
+ };
200
+ struct pixel_type
201
+ {
202
+ value_type c[num_components];
203
+
204
+ void set(value_type r, value_type g, value_type b)
205
+ {
206
+ c[order_type::R] = r;
207
+ c[order_type::G] = g;
208
+ c[order_type::B] = b;
209
+ }
210
+
211
+ void set(const color_type& color)
212
+ {
213
+ set(color.r, color.g, color.b);
214
+ }
215
+
216
+ void get(value_type& r, value_type& g, value_type& b) const
217
+ {
218
+ r = c[order_type::R];
219
+ g = c[order_type::G];
220
+ b = c[order_type::B];
221
+ }
222
+
223
+ color_type get() const
224
+ {
225
+ return color_type(
226
+ c[order_type::R],
227
+ c[order_type::G],
228
+ c[order_type::B]);
229
+ }
230
+
231
+ pixel_type* next()
232
+ {
233
+ return (pixel_type*)(c + pix_step);
234
+ }
235
+
236
+ const pixel_type* next() const
237
+ {
238
+ return (const pixel_type*)(c + pix_step);
239
+ }
240
+
241
+ pixel_type* advance(int n)
242
+ {
243
+ return (pixel_type*)(c + n * pix_step);
244
+ }
245
+
246
+ const pixel_type* advance(int n) const
247
+ {
248
+ return (const pixel_type*)(c + n * pix_step);
249
+ }
250
+ };
251
+
252
+ private:
253
+ //--------------------------------------------------------------------
254
+ AGG_INLINE void blend_pix(pixel_type* p,
255
+ value_type r, value_type g, value_type b, value_type a,
256
+ unsigned cover)
257
+ {
258
+ m_blender.blend_pix(p->c, r, g, b, a, cover);
259
+ }
260
+
261
+ //--------------------------------------------------------------------
262
+ AGG_INLINE void blend_pix(pixel_type* p,
263
+ value_type r, value_type g, value_type b, value_type a)
264
+ {
265
+ m_blender.blend_pix(p->c, r, g, b, a);
266
+ }
267
+
268
+ //--------------------------------------------------------------------
269
+ AGG_INLINE void blend_pix(pixel_type* p, const color_type& c, unsigned cover)
270
+ {
271
+ m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a, cover);
272
+ }
273
+
274
+ //--------------------------------------------------------------------
275
+ AGG_INLINE void blend_pix(pixel_type* p, const color_type& c)
276
+ {
277
+ m_blender.blend_pix(p->c, c.r, c.g, c.b, c.a);
278
+ }
279
+
280
+ //--------------------------------------------------------------------
281
+ AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c, unsigned cover)
282
+ {
283
+ if (!c.is_transparent())
284
+ {
285
+ if (c.is_opaque() && cover == cover_mask)
286
+ {
287
+ p->set(c);
288
+ }
289
+ else
290
+ {
291
+ blend_pix(p, c, cover);
292
+ }
293
+ }
294
+ }
295
+
296
+ //--------------------------------------------------------------------
297
+ AGG_INLINE void copy_or_blend_pix(pixel_type* p, const color_type& c)
298
+ {
299
+ if (!c.is_transparent())
300
+ {
301
+ if (c.is_opaque())
302
+ {
303
+ p->set(c);
304
+ }
305
+ else
306
+ {
307
+ blend_pix(p, c);
308
+ }
309
+ }
310
+ }
311
+
312
+ public:
313
+ //--------------------------------------------------------------------
314
+ explicit pixfmt_alpha_blend_rgb(rbuf_type& rb) :
315
+ m_rbuf(&rb)
316
+ {}
317
+ void attach(rbuf_type& rb) { m_rbuf = &rb; }
318
+
319
+ //--------------------------------------------------------------------
320
+ template<class PixFmt>
321
+ bool attach(PixFmt& pixf, int x1, int y1, int x2, int y2)
322
+ {
323
+ rect_i r(x1, y1, x2, y2);
324
+ if (r.clip(rect_i(0, 0, pixf.width()-1, pixf.height()-1)))
325
+ {
326
+ int stride = pixf.stride();
327
+ m_rbuf->attach(pixf.pix_ptr(r.x1, stride < 0 ? r.y2 : r.y1),
328
+ (r.x2 - r.x1) + 1,
329
+ (r.y2 - r.y1) + 1,
330
+ stride);
331
+ return true;
332
+ }
333
+ return false;
334
+ }
335
+
336
+ //--------------------------------------------------------------------
337
+ Blender& blender() { return m_blender; }
338
+
339
+ //--------------------------------------------------------------------
340
+ AGG_INLINE unsigned width() const { return m_rbuf->width(); }
341
+ AGG_INLINE unsigned height() const { return m_rbuf->height(); }
342
+ AGG_INLINE int stride() const { return m_rbuf->stride(); }
343
+
344
+ //--------------------------------------------------------------------
345
+ AGG_INLINE int8u* row_ptr(int y) { return m_rbuf->row_ptr(y); }
346
+ AGG_INLINE const int8u* row_ptr(int y) const { return m_rbuf->row_ptr(y); }
347
+ AGG_INLINE row_data row(int y) const { return m_rbuf->row(y); }
348
+
349
+ //--------------------------------------------------------------------
350
+ AGG_INLINE int8u* pix_ptr(int x, int y)
351
+ {
352
+ return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step + pix_offset);
353
+ }
354
+
355
+ AGG_INLINE const int8u* pix_ptr(int x, int y) const
356
+ {
357
+ return m_rbuf->row_ptr(y) + sizeof(value_type) * (x * pix_step + pix_offset);
358
+ }
359
+
360
+ // Return pointer to pixel value, forcing row to be allocated.
361
+ AGG_INLINE pixel_type* pix_value_ptr(int x, int y, unsigned len)
362
+ {
363
+ return (pixel_type*)(m_rbuf->row_ptr(x, y, len) + sizeof(value_type) * (x * pix_step + pix_offset));
364
+ }
365
+
366
+ // Return pointer to pixel value, or null if row not allocated.
367
+ AGG_INLINE const pixel_type* pix_value_ptr(int x, int y) const
368
+ {
369
+ int8u* p = m_rbuf->row_ptr(y);
370
+ return p ? (pixel_type*)(p + sizeof(value_type) * (x * pix_step + pix_offset)) : 0;
371
+ }
372
+
373
+ // Get pixel pointer from raw buffer pointer.
374
+ AGG_INLINE static pixel_type* pix_value_ptr(void* p)
375
+ {
376
+ return (pixel_type*)((value_type*)p + pix_offset);
377
+ }
378
+
379
+ // Get pixel pointer from raw buffer pointer.
380
+ AGG_INLINE static const pixel_type* pix_value_ptr(const void* p)
381
+ {
382
+ return (const pixel_type*)((const value_type*)p + pix_offset);
383
+ }
384
+
385
+ //--------------------------------------------------------------------
386
+ AGG_INLINE static void write_plain_color(void* p, color_type c)
387
+ {
388
+ // RGB formats are implicitly premultiplied.
389
+ c.premultiply();
390
+ pix_value_ptr(p)->set(c);
391
+ }
392
+
393
+ //--------------------------------------------------------------------
394
+ AGG_INLINE static color_type read_plain_color(const void* p)
395
+ {
396
+ return pix_value_ptr(p)->get();
397
+ }
398
+
399
+ //--------------------------------------------------------------------
400
+ AGG_INLINE static void make_pix(int8u* p, const color_type& c)
401
+ {
402
+ ((pixel_type*)p)->set(c);
403
+ }
404
+
405
+ //--------------------------------------------------------------------
406
+ AGG_INLINE color_type pixel(int x, int y) const
407
+ {
408
+ if (const pixel_type* p = pix_value_ptr(x, y))
409
+ {
410
+ return p->get();
411
+ }
412
+ return color_type::no_color();
413
+ }
414
+
415
+ //--------------------------------------------------------------------
416
+ AGG_INLINE void copy_pixel(int x, int y, const color_type& c)
417
+ {
418
+ pix_value_ptr(x, y, 1)->set(c);
419
+ }
420
+
421
+ //--------------------------------------------------------------------
422
+ AGG_INLINE void blend_pixel(int x, int y, const color_type& c, int8u cover)
423
+ {
424
+ copy_or_blend_pix(pix_value_ptr(x, y, 1), c, cover);
425
+ }
426
+
427
+ //--------------------------------------------------------------------
428
+ AGG_INLINE void copy_hline(int x, int y,
429
+ unsigned len,
430
+ const color_type& c)
431
+ {
432
+ pixel_type* p = pix_value_ptr(x, y, len);
433
+ do
434
+ {
435
+ p->set(c);
436
+ p = p->next();
437
+ }
438
+ while(--len);
439
+ }
440
+
441
+
442
+ //--------------------------------------------------------------------
443
+ AGG_INLINE void copy_vline(int x, int y,
444
+ unsigned len,
445
+ const color_type& c)
446
+ {
447
+ do
448
+ {
449
+ pix_value_ptr(x, y++, 1)->set(c);
450
+ }
451
+ while (--len);
452
+ }
453
+
454
+ //--------------------------------------------------------------------
455
+ void blend_hline(int x, int y,
456
+ unsigned len,
457
+ const color_type& c,
458
+ int8u cover)
459
+ {
460
+ if (!c.is_transparent())
461
+ {
462
+ pixel_type* p = pix_value_ptr(x, y, len);
463
+
464
+ if (c.is_opaque() && cover == cover_mask)
465
+ {
466
+ do
467
+ {
468
+ p->set(c);
469
+ p = p->next();
470
+ }
471
+ while (--len);
472
+ }
473
+ else
474
+ {
475
+ do
476
+ {
477
+ blend_pix(p, c, cover);
478
+ p = p->next();
479
+ }
480
+ while (--len);
481
+ }
482
+ }
483
+ }
484
+
485
+
486
+ //--------------------------------------------------------------------
487
+ void blend_vline(int x, int y,
488
+ unsigned len,
489
+ const color_type& c,
490
+ int8u cover)
491
+ {
492
+ if (!c.is_transparent())
493
+ {
494
+ if (c.is_opaque() && cover == cover_mask)
495
+ {
496
+ do
497
+ {
498
+ pix_value_ptr(x, y++, 1)->set(c);
499
+ }
500
+ while (--len);
501
+ }
502
+ else
503
+ {
504
+ do
505
+ {
506
+ blend_pix(pix_value_ptr(x, y++, 1), c, cover);
507
+ }
508
+ while (--len);
509
+ }
510
+ }
511
+ }
512
+
513
+ //--------------------------------------------------------------------
514
+ void blend_solid_hspan(int x, int y,
515
+ unsigned len,
516
+ const color_type& c,
517
+ const int8u* covers)
518
+ {
519
+ if (!c.is_transparent())
520
+ {
521
+ pixel_type* p = pix_value_ptr(x, y, len);
522
+
523
+ do
524
+ {
525
+ if (c.is_opaque() && *covers == cover_mask)
526
+ {
527
+ p->set(c);
528
+ }
529
+ else
530
+ {
531
+ blend_pix(p, c, *covers);
532
+ }
533
+ p = p->next();
534
+ ++covers;
535
+ }
536
+ while (--len);
537
+ }
538
+ }
539
+
540
+
541
+ //--------------------------------------------------------------------
542
+ void blend_solid_vspan(int x, int y,
543
+ unsigned len,
544
+ const color_type& c,
545
+ const int8u* covers)
546
+ {
547
+ if (!c.is_transparent())
548
+ {
549
+ do
550
+ {
551
+ pixel_type* p = pix_value_ptr(x, y++, 1);
552
+
553
+ if (c.is_opaque() && *covers == cover_mask)
554
+ {
555
+ p->set(c);
556
+ }
557
+ else
558
+ {
559
+ blend_pix(p, c, *covers);
560
+ }
561
+ ++covers;
562
+ }
563
+ while (--len);
564
+ }
565
+ }
566
+
567
+ //--------------------------------------------------------------------
568
+ void copy_color_hspan(int x, int y,
569
+ unsigned len,
570
+ const color_type* colors)
571
+ {
572
+ pixel_type* p = pix_value_ptr(x, y, len);
573
+
574
+ do
575
+ {
576
+ p->set(*colors++);
577
+ p = p->next();
578
+ }
579
+ while (--len);
580
+ }
581
+
582
+
583
+ //--------------------------------------------------------------------
584
+ void copy_color_vspan(int x, int y,
585
+ unsigned len,
586
+ const color_type* colors)
587
+ {
588
+ do
589
+ {
590
+ pix_value_ptr(x, y++, 1)->set(*colors++);
591
+ }
592
+ while (--len);
593
+ }
594
+
595
+ //--------------------------------------------------------------------
596
+ void blend_color_hspan(int x, int y,
597
+ unsigned len,
598
+ const color_type* colors,
599
+ const int8u* covers,
600
+ int8u cover)
601
+ {
602
+ pixel_type* p = pix_value_ptr(x, y, len);
603
+
604
+ if (covers)
605
+ {
606
+ do
607
+ {
608
+ copy_or_blend_pix(p, *colors++, *covers++);
609
+ p = p->next();
610
+ }
611
+ while (--len);
612
+ }
613
+ else
614
+ {
615
+ if (cover == cover_mask)
616
+ {
617
+ do
618
+ {
619
+ copy_or_blend_pix(p, *colors++);
620
+ p = p->next();
621
+ }
622
+ while (--len);
623
+ }
624
+ else
625
+ {
626
+ do
627
+ {
628
+ copy_or_blend_pix(p, *colors++, cover);
629
+ p = p->next();
630
+ }
631
+ while (--len);
632
+ }
633
+ }
634
+ }
635
+
636
+ //--------------------------------------------------------------------
637
+ void blend_color_vspan(int x, int y,
638
+ unsigned len,
639
+ const color_type* colors,
640
+ const int8u* covers,
641
+ int8u cover)
642
+ {
643
+ if (covers)
644
+ {
645
+ do
646
+ {
647
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, *covers++);
648
+ }
649
+ while (--len);
650
+ }
651
+ else
652
+ {
653
+ if (cover == cover_mask)
654
+ {
655
+ do
656
+ {
657
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++);
658
+ }
659
+ while (--len);
660
+ }
661
+ else
662
+ {
663
+ do
664
+ {
665
+ copy_or_blend_pix(pix_value_ptr(x, y++, 1), *colors++, cover);
666
+ }
667
+ while (--len);
668
+ }
669
+ }
670
+ }
671
+
672
+ //--------------------------------------------------------------------
673
+ template<class Function> void for_each_pixel(Function f)
674
+ {
675
+ for (unsigned y = 0; y < height(); ++y)
676
+ {
677
+ row_data r = m_rbuf->row(y);
678
+ if (r.ptr)
679
+ {
680
+ unsigned len = r.x2 - r.x1 + 1;
681
+ pixel_type* p = pix_value_ptr(r.x1, y, len);
682
+ do
683
+ {
684
+ f(p->c);
685
+ p = p->next();
686
+ }
687
+ while (--len);
688
+ }
689
+ }
690
+ }
691
+
692
+ //--------------------------------------------------------------------
693
+ template<class GammaLut> void apply_gamma_dir(const GammaLut& g)
694
+ {
695
+ for_each_pixel(apply_gamma_dir_rgb<color_type, order_type, GammaLut>(g));
696
+ }
697
+
698
+ //--------------------------------------------------------------------
699
+ template<class GammaLut> void apply_gamma_inv(const GammaLut& g)
700
+ {
701
+ for_each_pixel(apply_gamma_inv_rgb<color_type, order_type, GammaLut>(g));
702
+ }
703
+
704
+ //--------------------------------------------------------------------
705
+ template<class RenBuf2>
706
+ void copy_from(const RenBuf2& from,
707
+ int xdst, int ydst,
708
+ int xsrc, int ysrc,
709
+ unsigned len)
710
+ {
711
+ if (const int8u* p = from.row_ptr(ysrc))
712
+ {
713
+ memmove(m_rbuf->row_ptr(xdst, ydst, len) + xdst * pix_width,
714
+ p + xsrc * pix_width,
715
+ len * pix_width);
716
+ }
717
+ }
718
+
719
+ //--------------------------------------------------------------------
720
+ // Blend from an RGBA surface.
721
+ template<class SrcPixelFormatRenderer>
722
+ void blend_from(const SrcPixelFormatRenderer& from,
723
+ int xdst, int ydst,
724
+ int xsrc, int ysrc,
725
+ unsigned len,
726
+ int8u cover)
727
+ {
728
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
729
+ typedef typename SrcPixelFormatRenderer::order_type src_order;
730
+
731
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
732
+ {
733
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
734
+
735
+ if (cover == cover_mask)
736
+ {
737
+ do
738
+ {
739
+ value_type alpha = psrc->c[src_order::A];
740
+ if (alpha <= color_type::empty_value())
741
+ {
742
+ if (alpha >= color_type::full_value())
743
+ {
744
+ pdst->c[order_type::R] = psrc->c[src_order::R];
745
+ pdst->c[order_type::G] = psrc->c[src_order::G];
746
+ pdst->c[order_type::B] = psrc->c[src_order::B];
747
+ }
748
+ else
749
+ {
750
+ blend_pix(pdst,
751
+ psrc->c[src_order::R],
752
+ psrc->c[src_order::G],
753
+ psrc->c[src_order::B],
754
+ alpha);
755
+ }
756
+ }
757
+ psrc = psrc->next();
758
+ pdst = pdst->next();
759
+ }
760
+ while(--len);
761
+ }
762
+ else
763
+ {
764
+ do
765
+ {
766
+ copy_or_blend_pix(pdst, psrc->get(), cover);
767
+ psrc = psrc->next();
768
+ pdst = pdst->next();
769
+ }
770
+ while (--len);
771
+ }
772
+ }
773
+ }
774
+
775
+ //--------------------------------------------------------------------
776
+ // Blend from single color, using grayscale surface as alpha channel.
777
+ template<class SrcPixelFormatRenderer>
778
+ void blend_from_color(const SrcPixelFormatRenderer& from,
779
+ const color_type& color,
780
+ int xdst, int ydst,
781
+ int xsrc, int ysrc,
782
+ unsigned len,
783
+ int8u cover)
784
+ {
785
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
786
+ typedef typename SrcPixelFormatRenderer::color_type src_color_type;
787
+
788
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
789
+ {
790
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
791
+
792
+ do
793
+ {
794
+ copy_or_blend_pix(pdst, color, src_color_type::scale_cover(cover, psrc->c[0]));
795
+ psrc = psrc->next();
796
+ pdst = pdst->next();
797
+ }
798
+ while (--len);
799
+ }
800
+ }
801
+
802
+ //--------------------------------------------------------------------
803
+ // Blend from color table, using grayscale surface as indexes into table.
804
+ // Obviously, this only works for integer value types.
805
+ template<class SrcPixelFormatRenderer>
806
+ void blend_from_lut(const SrcPixelFormatRenderer& from,
807
+ const color_type* color_lut,
808
+ int xdst, int ydst,
809
+ int xsrc, int ysrc,
810
+ unsigned len,
811
+ int8u cover)
812
+ {
813
+ typedef typename SrcPixelFormatRenderer::pixel_type src_pixel_type;
814
+
815
+ if (const src_pixel_type* psrc = from.pix_value_ptr(xsrc, ysrc))
816
+ {
817
+ pixel_type* pdst = pix_value_ptr(xdst, ydst, len);
818
+
819
+ if (cover == cover_mask)
820
+ {
821
+ do
822
+ {
823
+ const color_type& color = color_lut[psrc->c[0]];
824
+ blend_pix(pdst, color);
825
+ psrc = psrc->next();
826
+ pdst = pdst->next();
827
+ }
828
+ while(--len);
829
+ }
830
+ else
831
+ {
832
+ do
833
+ {
834
+ copy_or_blend_pix(pdst, color_lut[psrc->c[0]], cover);
835
+ psrc = psrc->next();
836
+ pdst = pdst->next();
837
+ }
838
+ while(--len);
839
+ }
840
+ }
841
+ }
842
+
843
+ private:
844
+ rbuf_type* m_rbuf;
845
+ Blender m_blender;
846
+ };
847
+
848
+ //-----------------------------------------------------------------------
849
+ typedef blender_rgb<rgba8, order_rgb> blender_rgb24;
850
+ typedef blender_rgb<rgba8, order_bgr> blender_bgr24;
851
+ typedef blender_rgb<srgba8, order_rgb> blender_srgb24;
852
+ typedef blender_rgb<srgba8, order_bgr> blender_sbgr24;
853
+ typedef blender_rgb<rgba16, order_rgb> blender_rgb48;
854
+ typedef blender_rgb<rgba16, order_bgr> blender_bgr48;
855
+ typedef blender_rgb<rgba32, order_rgb> blender_rgb96;
856
+ typedef blender_rgb<rgba32, order_bgr> blender_bgr96;
857
+
858
+ typedef blender_rgb_pre<rgba8, order_rgb> blender_rgb24_pre;
859
+ typedef blender_rgb_pre<rgba8, order_bgr> blender_bgr24_pre;
860
+ typedef blender_rgb_pre<srgba8, order_rgb> blender_srgb24_pre;
861
+ typedef blender_rgb_pre<srgba8, order_bgr> blender_sbgr24_pre;
862
+ typedef blender_rgb_pre<rgba16, order_rgb> blender_rgb48_pre;
863
+ typedef blender_rgb_pre<rgba16, order_bgr> blender_bgr48_pre;
864
+ typedef blender_rgb_pre<rgba32, order_rgb> blender_rgb96_pre;
865
+ typedef blender_rgb_pre<rgba32, order_bgr> blender_bgr96_pre;
866
+
867
+ typedef pixfmt_alpha_blend_rgb<blender_rgb24, rendering_buffer, 3> pixfmt_rgb24;
868
+ typedef pixfmt_alpha_blend_rgb<blender_bgr24, rendering_buffer, 3> pixfmt_bgr24;
869
+ typedef pixfmt_alpha_blend_rgb<blender_srgb24, rendering_buffer, 3> pixfmt_srgb24;
870
+ typedef pixfmt_alpha_blend_rgb<blender_sbgr24, rendering_buffer, 3> pixfmt_sbgr24;
871
+ typedef pixfmt_alpha_blend_rgb<blender_rgb48, rendering_buffer, 3> pixfmt_rgb48;
872
+ typedef pixfmt_alpha_blend_rgb<blender_bgr48, rendering_buffer, 3> pixfmt_bgr48;
873
+ typedef pixfmt_alpha_blend_rgb<blender_rgb96, rendering_buffer, 3> pixfmt_rgb96;
874
+ typedef pixfmt_alpha_blend_rgb<blender_bgr96, rendering_buffer, 3> pixfmt_bgr96;
875
+
876
+ typedef pixfmt_alpha_blend_rgb<blender_rgb24_pre, rendering_buffer, 3> pixfmt_rgb24_pre;
877
+ typedef pixfmt_alpha_blend_rgb<blender_bgr24_pre, rendering_buffer, 3> pixfmt_bgr24_pre;
878
+ typedef pixfmt_alpha_blend_rgb<blender_srgb24_pre, rendering_buffer, 3> pixfmt_srgb24_pre;
879
+ typedef pixfmt_alpha_blend_rgb<blender_sbgr24_pre, rendering_buffer, 3> pixfmt_sbgr24_pre;
880
+ typedef pixfmt_alpha_blend_rgb<blender_rgb48_pre, rendering_buffer, 3> pixfmt_rgb48_pre;
881
+ typedef pixfmt_alpha_blend_rgb<blender_bgr48_pre, rendering_buffer, 3> pixfmt_bgr48_pre;
882
+ typedef pixfmt_alpha_blend_rgb<blender_rgb96_pre, rendering_buffer, 3> pixfmt_rgb96_pre;
883
+ typedef pixfmt_alpha_blend_rgb<blender_bgr96_pre, rendering_buffer, 3> pixfmt_bgr96_pre;
884
+
885
+ typedef pixfmt_alpha_blend_rgb<blender_rgb24, rendering_buffer, 4, 0> pixfmt_rgbx32;
886
+ typedef pixfmt_alpha_blend_rgb<blender_rgb24, rendering_buffer, 4, 1> pixfmt_xrgb32;
887
+ typedef pixfmt_alpha_blend_rgb<blender_bgr24, rendering_buffer, 4, 1> pixfmt_xbgr32;
888
+ typedef pixfmt_alpha_blend_rgb<blender_bgr24, rendering_buffer, 4, 0> pixfmt_bgrx32;
889
+ typedef pixfmt_alpha_blend_rgb<blender_srgb24, rendering_buffer, 4, 0> pixfmt_srgbx32;
890
+ typedef pixfmt_alpha_blend_rgb<blender_srgb24, rendering_buffer, 4, 1> pixfmt_sxrgb32;
891
+ typedef pixfmt_alpha_blend_rgb<blender_sbgr24, rendering_buffer, 4, 1> pixfmt_sxbgr32;
892
+ typedef pixfmt_alpha_blend_rgb<blender_sbgr24, rendering_buffer, 4, 0> pixfmt_sbgrx32;
893
+ typedef pixfmt_alpha_blend_rgb<blender_rgb48, rendering_buffer, 4, 0> pixfmt_rgbx64;
894
+ typedef pixfmt_alpha_blend_rgb<blender_rgb48, rendering_buffer, 4, 1> pixfmt_xrgb64;
895
+ typedef pixfmt_alpha_blend_rgb<blender_bgr48, rendering_buffer, 4, 1> pixfmt_xbgr64;
896
+ typedef pixfmt_alpha_blend_rgb<blender_bgr48, rendering_buffer, 4, 0> pixfmt_bgrx64;
897
+ typedef pixfmt_alpha_blend_rgb<blender_rgb96, rendering_buffer, 4, 0> pixfmt_rgbx128;
898
+ typedef pixfmt_alpha_blend_rgb<blender_rgb96, rendering_buffer, 4, 1> pixfmt_xrgb128;
899
+ typedef pixfmt_alpha_blend_rgb<blender_bgr96, rendering_buffer, 4, 1> pixfmt_xbgr128;
900
+ typedef pixfmt_alpha_blend_rgb<blender_bgr96, rendering_buffer, 4, 0> pixfmt_bgrx128;
901
+
902
+ typedef pixfmt_alpha_blend_rgb<blender_rgb24_pre, rendering_buffer, 4, 0> pixfmt_rgbx32_pre;
903
+ typedef pixfmt_alpha_blend_rgb<blender_rgb24_pre, rendering_buffer, 4, 1> pixfmt_xrgb32_pre;
904
+ typedef pixfmt_alpha_blend_rgb<blender_bgr24_pre, rendering_buffer, 4, 1> pixfmt_xbgr32_pre;
905
+ typedef pixfmt_alpha_blend_rgb<blender_bgr24_pre, rendering_buffer, 4, 0> pixfmt_bgrx32_pre;
906
+ typedef pixfmt_alpha_blend_rgb<blender_srgb24_pre, rendering_buffer, 4, 0> pixfmt_srgbx32_pre;
907
+ typedef pixfmt_alpha_blend_rgb<blender_srgb24_pre, rendering_buffer, 4, 1> pixfmt_sxrgb32_pre;
908
+ typedef pixfmt_alpha_blend_rgb<blender_sbgr24_pre, rendering_buffer, 4, 1> pixfmt_sxbgr32_pre;
909
+ typedef pixfmt_alpha_blend_rgb<blender_sbgr24_pre, rendering_buffer, 4, 0> pixfmt_sbgrx32_pre;
910
+ typedef pixfmt_alpha_blend_rgb<blender_rgb48_pre, rendering_buffer, 4, 0> pixfmt_rgbx64_pre;
911
+ typedef pixfmt_alpha_blend_rgb<blender_rgb48_pre, rendering_buffer, 4, 1> pixfmt_xrgb64_pre;
912
+ typedef pixfmt_alpha_blend_rgb<blender_bgr48_pre, rendering_buffer, 4, 1> pixfmt_xbgr64_pre;
913
+ typedef pixfmt_alpha_blend_rgb<blender_bgr48_pre, rendering_buffer, 4, 0> pixfmt_bgrx64_pre;
914
+ typedef pixfmt_alpha_blend_rgb<blender_rgb96_pre, rendering_buffer, 4, 0> pixfmt_rgbx128_pre;
915
+ typedef pixfmt_alpha_blend_rgb<blender_rgb96_pre, rendering_buffer, 4, 1> pixfmt_xrgb128_pre;
916
+ typedef pixfmt_alpha_blend_rgb<blender_bgr96_pre, rendering_buffer, 4, 1> pixfmt_xbgr128_pre;
917
+ typedef pixfmt_alpha_blend_rgb<blender_bgr96_pre, rendering_buffer, 4, 0> pixfmt_bgrx128_pre;
918
+
919
+
920
+ //-----------------------------------------------------pixfmt_rgb24_gamma
921
+ template<class Gamma> class pixfmt_rgb24_gamma :
922
+ public pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba8, order_rgb, Gamma>, rendering_buffer, 3>
923
+ {
924
+ public:
925
+ pixfmt_rgb24_gamma(rendering_buffer& rb, const Gamma& g) :
926
+ pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba8, order_rgb, Gamma>, rendering_buffer, 3>(rb)
927
+ {
928
+ this->blender().gamma(g);
929
+ }
930
+ };
931
+
932
+ //-----------------------------------------------------pixfmt_srgb24_gamma
933
+ template<class Gamma> class pixfmt_srgb24_gamma :
934
+ public pixfmt_alpha_blend_rgb<blender_rgb_gamma<srgba8, order_rgb, Gamma>, rendering_buffer, 3>
935
+ {
936
+ public:
937
+ pixfmt_srgb24_gamma(rendering_buffer& rb, const Gamma& g) :
938
+ pixfmt_alpha_blend_rgb<blender_rgb_gamma<srgba8, order_rgb, Gamma>, rendering_buffer, 3>(rb)
939
+ {
940
+ this->blender().gamma(g);
941
+ }
942
+ };
943
+
944
+ //-----------------------------------------------------pixfmt_bgr24_gamma
945
+ template<class Gamma> class pixfmt_bgr24_gamma :
946
+ public pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba8, order_bgr, Gamma>, rendering_buffer, 3>
947
+ {
948
+ public:
949
+ pixfmt_bgr24_gamma(rendering_buffer& rb, const Gamma& g) :
950
+ pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba8, order_bgr, Gamma>, rendering_buffer, 3>(rb)
951
+ {
952
+ this->blender().gamma(g);
953
+ }
954
+ };
955
+
956
+ //-----------------------------------------------------pixfmt_sbgr24_gamma
957
+ template<class Gamma> class pixfmt_sbgr24_gamma :
958
+ public pixfmt_alpha_blend_rgb<blender_rgb_gamma<srgba8, order_bgr, Gamma>, rendering_buffer, 3>
959
+ {
960
+ public:
961
+ pixfmt_sbgr24_gamma(rendering_buffer& rb, const Gamma& g) :
962
+ pixfmt_alpha_blend_rgb<blender_rgb_gamma<srgba8, order_bgr, Gamma>, rendering_buffer, 3>(rb)
963
+ {
964
+ this->blender().gamma(g);
965
+ }
966
+ };
967
+
968
+ //-----------------------------------------------------pixfmt_rgb48_gamma
969
+ template<class Gamma> class pixfmt_rgb48_gamma :
970
+ public pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba16, order_rgb, Gamma>, rendering_buffer, 3>
971
+ {
972
+ public:
973
+ pixfmt_rgb48_gamma(rendering_buffer& rb, const Gamma& g) :
974
+ pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba16, order_rgb, Gamma>, rendering_buffer, 3>(rb)
975
+ {
976
+ this->blender().gamma(g);
977
+ }
978
+ };
979
+
980
+ //-----------------------------------------------------pixfmt_bgr48_gamma
981
+ template<class Gamma> class pixfmt_bgr48_gamma :
982
+ public pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba16, order_bgr, Gamma>, rendering_buffer, 3>
983
+ {
984
+ public:
985
+ pixfmt_bgr48_gamma(rendering_buffer& rb, const Gamma& g) :
986
+ pixfmt_alpha_blend_rgb<blender_rgb_gamma<rgba16, order_bgr, Gamma>, rendering_buffer, 3>(rb)
987
+ {
988
+ this->blender().gamma(g);
989
+ }
990
+ };
991
+
992
+ }
993
+
994
+ #endif
995
+
data/bundled_deps/agg/agg/agg_rasterizer_cells_aa.h ADDED
@@ -0,0 +1,741 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ //
12
+ // The author gratefully acknowleges the support of David Turner,
13
+ // Robert Wilhelm, and Werner Lemberg - the authors of the FreeType
14
+ // libray - in producing this work. See http://www.freetype.org for details.
15
+ //
16
+ //----------------------------------------------------------------------------
17
+ // Contact: [email protected]
18
19
+ // http://www.antigrain.com
20
+ //----------------------------------------------------------------------------
21
+ //
22
+ // Adaptation for 32-bit screen coordinates has been sponsored by
23
+ // Liberty Technology Systems, Inc., visit http://lib-sys.com
24
+ //
25
+ // Liberty Technology Systems, Inc. is the provider of
26
+ // PostScript and PDF technology for software developers.
27
+ //
28
+ //----------------------------------------------------------------------------
29
+ #ifndef AGG_RASTERIZER_CELLS_AA_INCLUDED
30
+ #define AGG_RASTERIZER_CELLS_AA_INCLUDED
31
+
32
+ #include <string.h>
33
+ #include <cstdlib>
34
+ #include <limits>
35
+ #include "agg_math.h"
36
+ #include "agg_array.h"
37
+
38
+
39
+ namespace agg
40
+ {
41
+
42
+ //-----------------------------------------------------rasterizer_cells_aa
43
+ // An internal class that implements the main rasterization algorithm.
44
+ // Used in the rasterizer. Should not be used direcly.
45
+ template<class Cell> class rasterizer_cells_aa
46
+ {
47
+ enum cell_block_scale_e
48
+ {
49
+ cell_block_shift = 12,
50
+ cell_block_size = 1 << cell_block_shift,
51
+ cell_block_mask = cell_block_size - 1,
52
+ cell_block_pool = 256,
53
+ cell_block_limit = 1024
54
+ };
55
+
56
+ struct sorted_y
57
+ {
58
+ unsigned start;
59
+ unsigned num;
60
+ };
61
+
62
+ public:
63
+ typedef Cell cell_type;
64
+ typedef rasterizer_cells_aa<Cell> self_type;
65
+
66
+ ~rasterizer_cells_aa();
67
+ rasterizer_cells_aa();
68
+
69
+ void reset();
70
+ void style(const cell_type& style_cell);
71
+ void line(int x1, int y1, int x2, int y2);
72
+
73
+ int min_x() const { return m_min_x; }
74
+ int min_y() const { return m_min_y; }
75
+ int max_x() const { return m_max_x; }
76
+ int max_y() const { return m_max_y; }
77
+
78
+ void sort_cells();
79
+
80
+ unsigned total_cells() const
81
+ {
82
+ return m_num_cells;
83
+ }
84
+
85
+ unsigned scanline_num_cells(unsigned y) const
86
+ {
87
+ return m_sorted_y[y - m_min_y].num;
88
+ }
89
+
90
+ const cell_type* const* scanline_cells(unsigned y) const
91
+ {
92
+ return m_sorted_cells.data() + m_sorted_y[y - m_min_y].start;
93
+ }
94
+
95
+ bool sorted() const { return m_sorted; }
96
+
97
+ private:
98
+ rasterizer_cells_aa(const self_type&);
99
+ const self_type& operator = (const self_type&);
100
+
101
+ void set_curr_cell(int x, int y);
102
+ void add_curr_cell();
103
+ void render_hline(int ey, int x1, int y1, int x2, int y2);
104
+ void allocate_block();
105
+
106
+ private:
107
+ unsigned m_num_blocks;
108
+ unsigned m_max_blocks;
109
+ unsigned m_curr_block;
110
+ unsigned m_num_cells;
111
+ cell_type** m_cells;
112
+ cell_type* m_curr_cell_ptr;
113
+ pod_vector<cell_type*> m_sorted_cells;
114
+ pod_vector<sorted_y> m_sorted_y;
115
+ cell_type m_curr_cell;
116
+ cell_type m_style_cell;
117
+ int m_min_x;
118
+ int m_min_y;
119
+ int m_max_x;
120
+ int m_max_y;
121
+ bool m_sorted;
122
+ };
123
+
124
+
125
+
126
+
127
+ //------------------------------------------------------------------------
128
+ template<class Cell>
129
+ rasterizer_cells_aa<Cell>::~rasterizer_cells_aa()
130
+ {
131
+ if(m_num_blocks)
132
+ {
133
+ cell_type** ptr = m_cells + m_num_blocks - 1;
134
+ while(m_num_blocks--)
135
+ {
136
+ pod_allocator<cell_type>::deallocate(*ptr, cell_block_size);
137
+ ptr--;
138
+ }
139
+ pod_allocator<cell_type*>::deallocate(m_cells, m_max_blocks);
140
+ }
141
+ }
142
+
143
+ //------------------------------------------------------------------------
144
+ template<class Cell>
145
+ rasterizer_cells_aa<Cell>::rasterizer_cells_aa() :
146
+ m_num_blocks(0),
147
+ m_max_blocks(0),
148
+ m_curr_block(0),
149
+ m_num_cells(0),
150
+ m_cells(0),
151
+ m_curr_cell_ptr(0),
152
+ m_sorted_cells(),
153
+ m_sorted_y(),
154
+ m_min_x(std::numeric_limits<int>::max()),
155
+ m_min_y(std::numeric_limits<int>::max()),
156
+ m_max_x(std::numeric_limits<int>::min()),
157
+ m_max_y(std::numeric_limits<int>::min()),
158
+ m_sorted(false)
159
+ {
160
+ m_style_cell.initial();
161
+ m_curr_cell.initial();
162
+ }
163
+
164
+ //------------------------------------------------------------------------
165
+ template<class Cell>
166
+ void rasterizer_cells_aa<Cell>::reset()
167
+ {
168
+ m_num_cells = 0;
169
+ m_curr_block = 0;
170
+ m_curr_cell.initial();
171
+ m_style_cell.initial();
172
+ m_sorted = false;
173
+ m_min_x = std::numeric_limits<int>::max();
174
+ m_min_y = std::numeric_limits<int>::max();
175
+ m_max_x = std::numeric_limits<int>::min();
176
+ m_max_y = std::numeric_limits<int>::min();
177
+ }
178
+
179
+ //------------------------------------------------------------------------
180
+ template<class Cell>
181
+ AGG_INLINE void rasterizer_cells_aa<Cell>::add_curr_cell()
182
+ {
183
+ if(m_curr_cell.area | m_curr_cell.cover)
184
+ {
185
+ if((m_num_cells & cell_block_mask) == 0)
186
+ {
187
+ if(m_num_blocks >= cell_block_limit) return;
188
+ allocate_block();
189
+ }
190
+ *m_curr_cell_ptr++ = m_curr_cell;
191
+ ++m_num_cells;
192
+ }
193
+ }
194
+
195
+ //------------------------------------------------------------------------
196
+ template<class Cell>
197
+ AGG_INLINE void rasterizer_cells_aa<Cell>::set_curr_cell(int x, int y)
198
+ {
199
+ if(m_curr_cell.not_equal(x, y, m_style_cell))
200
+ {
201
+ add_curr_cell();
202
+ m_curr_cell.style(m_style_cell);
203
+ m_curr_cell.x = x;
204
+ m_curr_cell.y = y;
205
+ m_curr_cell.cover = 0;
206
+ m_curr_cell.area = 0;
207
+ }
208
+ }
209
+
210
+ //------------------------------------------------------------------------
211
+ template<class Cell>
212
+ AGG_INLINE void rasterizer_cells_aa<Cell>::render_hline(int ey,
213
+ int x1, int y1,
214
+ int x2, int y2)
215
+ {
216
+ int ex1 = x1 >> poly_subpixel_shift;
217
+ int ex2 = x2 >> poly_subpixel_shift;
218
+ int fx1 = x1 & poly_subpixel_mask;
219
+ int fx2 = x2 & poly_subpixel_mask;
220
+
221
+ int delta, p, first;
222
+ long long dx;
223
+ int incr, lift, mod, rem;
224
+
225
+ //trivial case. Happens often
226
+ if(y1 == y2)
227
+ {
228
+ set_curr_cell(ex2, ey);
229
+ return;
230
+ }
231
+
232
+ //everything is located in a single cell. That is easy!
233
+ if(ex1 == ex2)
234
+ {
235
+ delta = y2 - y1;
236
+ m_curr_cell.cover += delta;
237
+ m_curr_cell.area += (fx1 + fx2) * delta;
238
+ return;
239
+ }
240
+
241
+ //ok, we'll have to render a run of adjacent cells on the same
242
+ //hline...
243
+ p = (poly_subpixel_scale - fx1) * (y2 - y1);
244
+ first = poly_subpixel_scale;
245
+ incr = 1;
246
+
247
+ dx = (long long)x2 - (long long)x1;
248
+
249
+ if(dx < 0)
250
+ {
251
+ p = fx1 * (y2 - y1);
252
+ first = 0;
253
+ incr = -1;
254
+ dx = -dx;
255
+ }
256
+
257
+ delta = (int)(p / dx);
258
+ mod = (int)(p % dx);
259
+
260
+ if(mod < 0)
261
+ {
262
+ delta--;
263
+ mod += static_cast<int>(dx);
264
+ }
265
+
266
+ m_curr_cell.cover += delta;
267
+ m_curr_cell.area += (fx1 + first) * delta;
268
+
269
+ ex1 += incr;
270
+ set_curr_cell(ex1, ey);
271
+ y1 += delta;
272
+
273
+ if(ex1 != ex2)
274
+ {
275
+ p = poly_subpixel_scale * (y2 - y1 + delta);
276
+ lift = (int)(p / dx);
277
+ rem = (int)(p % dx);
278
+
279
+ if (rem < 0)
280
+ {
281
+ lift--;
282
+ rem += static_cast<int>(dx);
283
+ }
284
+
285
+ mod -= static_cast<int>(dx);
286
+
287
+ while (ex1 != ex2)
288
+ {
289
+ delta = lift;
290
+ mod += rem;
291
+ if(mod >= 0)
292
+ {
293
+ mod -= static_cast<int>(dx);
294
+ delta++;
295
+ }
296
+
297
+ m_curr_cell.cover += delta;
298
+ m_curr_cell.area += poly_subpixel_scale * delta;
299
+ y1 += delta;
300
+ ex1 += incr;
301
+ set_curr_cell(ex1, ey);
302
+ }
303
+ }
304
+ delta = y2 - y1;
305
+ m_curr_cell.cover += delta;
306
+ m_curr_cell.area += (fx2 + poly_subpixel_scale - first) * delta;
307
+ }
308
+
309
+ //------------------------------------------------------------------------
310
+ template<class Cell>
311
+ AGG_INLINE void rasterizer_cells_aa<Cell>::style(const cell_type& style_cell)
312
+ {
313
+ m_style_cell.style(style_cell);
314
+ }
315
+
316
+ //------------------------------------------------------------------------
317
+ template<class Cell>
318
+ void rasterizer_cells_aa<Cell>::line(int x1, int y1, int x2, int y2)
319
+ {
320
+ enum dx_limit_e { dx_limit = 16384 << poly_subpixel_shift };
321
+
322
+ long long dx = (long long)x2 - (long long)x1;
323
+
324
+ if(dx >= dx_limit || dx <= -dx_limit)
325
+ {
326
+ int cx = (int)(((long long)x1 + (long long)x2) >> 1);
327
+ int cy = (int)(((long long)y1 + (long long)y2) >> 1);
328
+ line(x1, y1, cx, cy);
329
+ line(cx, cy, x2, y2);
330
+ }
331
+
332
+ long long dy = (long long)y2 - (long long)y1;
333
+ int ex1 = x1 >> poly_subpixel_shift;
334
+ int ex2 = x2 >> poly_subpixel_shift;
335
+ int ey1 = y1 >> poly_subpixel_shift;
336
+ int ey2 = y2 >> poly_subpixel_shift;
337
+ int fy1 = y1 & poly_subpixel_mask;
338
+ int fy2 = y2 & poly_subpixel_mask;
339
+
340
+ int x_from, x_to;
341
+ int rem, mod, lift, delta, first, incr;
342
+ long long p;
343
+
344
+ if(ex1 < m_min_x) m_min_x = ex1;
345
+ if(ex1 > m_max_x) m_max_x = ex1;
346
+ if(ey1 < m_min_y) m_min_y = ey1;
347
+ if(ey1 > m_max_y) m_max_y = ey1;
348
+ if(ex2 < m_min_x) m_min_x = ex2;
349
+ if(ex2 > m_max_x) m_max_x = ex2;
350
+ if(ey2 < m_min_y) m_min_y = ey2;
351
+ if(ey2 > m_max_y) m_max_y = ey2;
352
+
353
+ set_curr_cell(ex1, ey1);
354
+
355
+ //everything is on a single hline
356
+ if(ey1 == ey2)
357
+ {
358
+ render_hline(ey1, x1, fy1, x2, fy2);
359
+ return;
360
+ }
361
+
362
+ //Vertical line - we have to calculate start and end cells,
363
+ //and then - the common values of the area and coverage for
364
+ //all cells of the line. We know exactly there's only one
365
+ //cell, so, we don't have to call render_hline().
366
+ incr = 1;
367
+ if(dx == 0)
368
+ {
369
+ int ex = x1 >> poly_subpixel_shift;
370
+ int two_fx = (x1 - (ex << poly_subpixel_shift)) << 1;
371
+ int area;
372
+
373
+ first = poly_subpixel_scale;
374
+ if(dy < 0)
375
+ {
376
+ first = 0;
377
+ incr = -1;
378
+ }
379
+
380
+ x_from = x1;
381
+
382
+ //render_hline(ey1, x_from, fy1, x_from, first);
383
+ delta = first - fy1;
384
+ m_curr_cell.cover += delta;
385
+ m_curr_cell.area += two_fx * delta;
386
+
387
+ ey1 += incr;
388
+ set_curr_cell(ex, ey1);
389
+
390
+ delta = first + first - poly_subpixel_scale;
391
+ area = two_fx * delta;
392
+ while(ey1 != ey2)
393
+ {
394
+ //render_hline(ey1, x_from, poly_subpixel_scale - first, x_from, first);
395
+ m_curr_cell.cover = delta;
396
+ m_curr_cell.area = area;
397
+ ey1 += incr;
398
+ set_curr_cell(ex, ey1);
399
+ }
400
+ //render_hline(ey1, x_from, poly_subpixel_scale - first, x_from, fy2);
401
+ delta = fy2 - poly_subpixel_scale + first;
402
+ m_curr_cell.cover += delta;
403
+ m_curr_cell.area += two_fx * delta;
404
+ return;
405
+ }
406
+
407
+ //ok, we have to render several hlines
408
+ p = (poly_subpixel_scale - fy1) * dx;
409
+ first = poly_subpixel_scale;
410
+
411
+ if(dy < 0)
412
+ {
413
+ p = fy1 * dx;
414
+ first = 0;
415
+ incr = -1;
416
+ dy = -dy;
417
+ }
418
+
419
+ delta = (int)(p / dy);
420
+ mod = (int)(p % dy);
421
+
422
+ if(mod < 0)
423
+ {
424
+ delta--;
425
+ mod += static_cast<int>(dy);
426
+ }
427
+
428
+ x_from = x1 + delta;
429
+ render_hline(ey1, x1, fy1, x_from, first);
430
+
431
+ ey1 += incr;
432
+ set_curr_cell(x_from >> poly_subpixel_shift, ey1);
433
+
434
+ if(ey1 != ey2)
435
+ {
436
+ p = poly_subpixel_scale * dx;
437
+ lift = (int)(p / dy);
438
+ rem = (int)(p % dy);
439
+
440
+ if(rem < 0)
441
+ {
442
+ lift--;
443
+ rem += static_cast<int>(dy);
444
+ }
445
+ mod -= static_cast<int>(dy);
446
+
447
+ while(ey1 != ey2)
448
+ {
449
+ delta = lift;
450
+ mod += rem;
451
+ if (mod >= 0)
452
+ {
453
+ mod -= static_cast<int>(dy);
454
+ delta++;
455
+ }
456
+
457
+ x_to = x_from + delta;
458
+ render_hline(ey1, x_from, poly_subpixel_scale - first, x_to, first);
459
+ x_from = x_to;
460
+
461
+ ey1 += incr;
462
+ set_curr_cell(x_from >> poly_subpixel_shift, ey1);
463
+ }
464
+ }
465
+ render_hline(ey1, x_from, poly_subpixel_scale - first, x2, fy2);
466
+ }
467
+
468
+ //------------------------------------------------------------------------
469
+ template<class Cell>
470
+ void rasterizer_cells_aa<Cell>::allocate_block()
471
+ {
472
+ if(m_curr_block >= m_num_blocks)
473
+ {
474
+ if(m_num_blocks >= m_max_blocks)
475
+ {
476
+ cell_type** new_cells =
477
+ pod_allocator<cell_type*>::allocate(m_max_blocks +
478
+ cell_block_pool);
479
+
480
+ if(m_cells)
481
+ {
482
+ memcpy(new_cells, m_cells, m_max_blocks * sizeof(cell_type*));
483
+ pod_allocator<cell_type*>::deallocate(m_cells, m_max_blocks);
484
+ }
485
+ m_cells = new_cells;
486
+ m_max_blocks += cell_block_pool;
487
+ }
488
+
489
+ m_cells[m_num_blocks++] =
490
+ pod_allocator<cell_type>::allocate(cell_block_size);
491
+
492
+ }
493
+ m_curr_cell_ptr = m_cells[m_curr_block++];
494
+ }
495
+
496
+
497
+
498
+ //------------------------------------------------------------------------
499
+ template <class T> static AGG_INLINE void swap_cells(T* a, T* b)
500
+ {
501
+ T temp = *a;
502
+ *a = *b;
503
+ *b = temp;
504
+ }
505
+
506
+
507
+ //------------------------------------------------------------------------
508
+ enum
509
+ {
510
+ qsort_threshold = 9
511
+ };
512
+
513
+
514
+ //------------------------------------------------------------------------
515
+ template<class Cell>
516
+ void qsort_cells(Cell** start, unsigned num)
517
+ {
518
+ Cell** stack[80];
519
+ Cell*** top;
520
+ Cell** limit;
521
+ Cell** base;
522
+
523
+ limit = start + num;
524
+ base = start;
525
+ top = stack;
526
+
527
+ for (;;)
528
+ {
529
+ int len = int(limit - base);
530
+
531
+ Cell** i;
532
+ Cell** j;
533
+ Cell** pivot;
534
+
535
+ if(len > qsort_threshold)
536
+ {
537
+ // we use base + len/2 as the pivot
538
+ pivot = base + len / 2;
539
+ swap_cells(base, pivot);
540
+
541
+ i = base + 1;
542
+ j = limit - 1;
543
+
544
+ // now ensure that *i <= *base <= *j
545
+ if((*j)->x < (*i)->x)
546
+ {
547
+ swap_cells(i, j);
548
+ }
549
+
550
+ if((*base)->x < (*i)->x)
551
+ {
552
+ swap_cells(base, i);
553
+ }
554
+
555
+ if((*j)->x < (*base)->x)
556
+ {
557
+ swap_cells(base, j);
558
+ }
559
+
560
+ for(;;)
561
+ {
562
+ int x = (*base)->x;
563
+ do i++; while( (*i)->x < x );
564
+ do j--; while( x < (*j)->x );
565
+
566
+ if(i > j)
567
+ {
568
+ break;
569
+ }
570
+
571
+ swap_cells(i, j);
572
+ }
573
+
574
+ swap_cells(base, j);
575
+
576
+ // now, push the largest sub-array
577
+ if(j - base > limit - i)
578
+ {
579
+ top[0] = base;
580
+ top[1] = j;
581
+ base = i;
582
+ }
583
+ else
584
+ {
585
+ top[0] = i;
586
+ top[1] = limit;
587
+ limit = j;
588
+ }
589
+ top += 2;
590
+ }
591
+ else
592
+ {
593
+ // the sub-array is small, perform insertion sort
594
+ j = base;
595
+ i = j + 1;
596
+
597
+ for(; i < limit; j = i, i++)
598
+ {
599
+ for(; j[1]->x < (*j)->x; j--)
600
+ {
601
+ swap_cells(j + 1, j);
602
+ if (j == base)
603
+ {
604
+ break;
605
+ }
606
+ }
607
+ }
608
+
609
+ if(top > stack)
610
+ {
611
+ top -= 2;
612
+ base = top[0];
613
+ limit = top[1];
614
+ }
615
+ else
616
+ {
617
+ break;
618
+ }
619
+ }
620
+ }
621
+ }
622
+
623
+
624
+ //------------------------------------------------------------------------
625
+ template<class Cell>
626
+ void rasterizer_cells_aa<Cell>::sort_cells()
627
+ {
628
+ if(m_sorted) return; //Perform sort only the first time.
629
+
630
+ add_curr_cell();
631
+ m_curr_cell.x = std::numeric_limits<int>::max();
632
+ m_curr_cell.y = std::numeric_limits<int>::max();
633
+ m_curr_cell.cover = 0;
634
+ m_curr_cell.area = 0;
635
+
636
+ if(m_num_cells == 0) return;
637
+
638
+ // DBG: Check to see if min/max works well.
639
+ //for(unsigned nc = 0; nc < m_num_cells; nc++)
640
+ //{
641
+ // cell_type* cell = m_cells[nc >> cell_block_shift] + (nc & cell_block_mask);
642
+ // if(cell->x < m_min_x ||
643
+ // cell->y < m_min_y ||
644
+ // cell->x > m_max_x ||
645
+ // cell->y > m_max_y)
646
+ // {
647
+ // cell = cell; // Breakpoint here
648
+ // }
649
+ //}
650
+ // Allocate the array of cell pointers
651
+ m_sorted_cells.allocate(m_num_cells, 16);
652
+
653
+ // Allocate and zero the Y array
654
+ m_sorted_y.allocate(m_max_y - m_min_y + 1, 16);
655
+ m_sorted_y.zero();
656
+
657
+ // Create the Y-histogram (count the numbers of cells for each Y)
658
+ cell_type** block_ptr = m_cells;
659
+ cell_type* cell_ptr;
660
+ unsigned nb = m_num_cells;
661
+ unsigned i;
662
+ while(nb)
663
+ {
664
+ cell_ptr = *block_ptr++;
665
+ i = (nb > cell_block_size) ? unsigned(cell_block_size) : nb;
666
+ nb -= i;
667
+ while(i--)
668
+ {
669
+ m_sorted_y[cell_ptr->y - m_min_y].start++;
670
+ ++cell_ptr;
671
+ }
672
+ }
673
+
674
+ // Convert the Y-histogram into the array of starting indexes
675
+ unsigned start = 0;
676
+ for(i = 0; i < m_sorted_y.size(); i++)
677
+ {
678
+ unsigned v = m_sorted_y[i].start;
679
+ m_sorted_y[i].start = start;
680
+ start += v;
681
+ }
682
+
683
+ // Fill the cell pointer array sorted by Y
684
+ block_ptr = m_cells;
685
+ nb = m_num_cells;
686
+ while(nb)
687
+ {
688
+ cell_ptr = *block_ptr++;
689
+ i = (nb > cell_block_size) ? unsigned(cell_block_size) : nb;
690
+ nb -= i;
691
+ while(i--)
692
+ {
693
+ sorted_y& curr_y = m_sorted_y[cell_ptr->y - m_min_y];
694
+ m_sorted_cells[curr_y.start + curr_y.num] = cell_ptr;
695
+ ++curr_y.num;
696
+ ++cell_ptr;
697
+ }
698
+ }
699
+
700
+ // Finally arrange the X-arrays
701
+ for(i = 0; i < m_sorted_y.size(); i++)
702
+ {
703
+ const sorted_y& curr_y = m_sorted_y[i];
704
+ if(curr_y.num)
705
+ {
706
+ qsort_cells(m_sorted_cells.data() + curr_y.start, curr_y.num);
707
+ }
708
+ }
709
+ m_sorted = true;
710
+ }
711
+
712
+
713
+
714
+ //------------------------------------------------------scanline_hit_test
715
+ class scanline_hit_test
716
+ {
717
+ public:
718
+ scanline_hit_test(int x) : m_x(x), m_hit(false) {}
719
+
720
+ void reset_spans() {}
721
+ void finalize(int) {}
722
+ void add_cell(int x, int)
723
+ {
724
+ if(m_x == x) m_hit = true;
725
+ }
726
+ void add_span(int x, int len, int)
727
+ {
728
+ if(m_x >= x && m_x < x+len) m_hit = true;
729
+ }
730
+ unsigned num_spans() const { return 1; }
731
+ bool hit() const { return m_hit; }
732
+
733
+ private:
734
+ int m_x;
735
+ bool m_hit;
736
+ };
737
+
738
+
739
+ }
740
+
741
+ #endif
data/bundled_deps/agg/agg/agg_rasterizer_scanline_aa.h ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ //
12
+ // The author gratefully acknowleges the support of David Turner,
13
+ // Robert Wilhelm, and Werner Lemberg - the authors of the FreeType
14
+ // libray - in producing this work. See http://www.freetype.org for details.
15
+ //
16
+ //----------------------------------------------------------------------------
17
+ // Contact: [email protected]
18
19
+ // http://www.antigrain.com
20
+ //----------------------------------------------------------------------------
21
+ //
22
+ // Adaptation for 32-bit screen coordinates has been sponsored by
23
+ // Liberty Technology Systems, Inc., visit http://lib-sys.com
24
+ //
25
+ // Liberty Technology Systems, Inc. is the provider of
26
+ // PostScript and PDF technology for software developers.
27
+ //
28
+ //----------------------------------------------------------------------------
29
+ #ifndef AGG_RASTERIZER_SCANLINE_AA_INCLUDED
30
+ #define AGG_RASTERIZER_SCANLINE_AA_INCLUDED
31
+
32
+ #include "agg_rasterizer_cells_aa.h"
33
+ #include "agg_rasterizer_sl_clip.h"
34
+ #include "agg_rasterizer_scanline_aa_nogamma.h"
35
+ #include "agg_gamma_functions.h"
36
+
37
+
38
+ namespace agg
39
+ {
40
+ //==================================================rasterizer_scanline_aa
41
+ // Polygon rasterizer that is used to render filled polygons with
42
+ // high-quality Anti-Aliasing. Internally, by default, the class uses
43
+ // integer coordinates in format 24.8, i.e. 24 bits for integer part
44
+ // and 8 bits for fractional - see poly_subpixel_shift. This class can be
45
+ // used in the following way:
46
+ //
47
+ // 1. filling_rule(filling_rule_e ft) - optional.
48
+ //
49
+ // 2. gamma() - optional.
50
+ //
51
+ // 3. reset()
52
+ //
53
+ // 4. move_to(x, y) / line_to(x, y) - make the polygon. One can create
54
+ // more than one contour, but each contour must consist of at least 3
55
+ // vertices, i.e. move_to(x1, y1); line_to(x2, y2); line_to(x3, y3);
56
+ // is the absolute minimum of vertices that define a triangle.
57
+ // The algorithm does not check either the number of vertices nor
58
+ // coincidence of their coordinates, but in the worst case it just
59
+ // won't draw anything.
60
+ // The orger of the vertices (clockwise or counterclockwise)
61
+ // is important when using the non-zero filling rule (fill_non_zero).
62
+ // In this case the vertex order of all the contours must be the same
63
+ // if you want your intersecting polygons to be without "holes".
64
+ // You actually can use different vertices order. If the contours do not
65
+ // intersect each other the order is not important anyway. If they do,
66
+ // contours with the same vertex order will be rendered without "holes"
67
+ // while the intersecting contours with different orders will have "holes".
68
+ //
69
+ // filling_rule() and gamma() can be called anytime before "sweeping".
70
+ //------------------------------------------------------------------------
71
+ template<class Clip=rasterizer_sl_clip_int> class rasterizer_scanline_aa
72
+ {
73
+ enum status
74
+ {
75
+ status_initial,
76
+ status_move_to,
77
+ status_line_to,
78
+ status_closed
79
+ };
80
+
81
+ public:
82
+ typedef Clip clip_type;
83
+ typedef typename Clip::conv_type conv_type;
84
+ typedef typename Clip::coord_type coord_type;
85
+
86
+ enum aa_scale_e
87
+ {
88
+ aa_shift = 8,
89
+ aa_scale = 1 << aa_shift,
90
+ aa_mask = aa_scale - 1,
91
+ aa_scale2 = aa_scale * 2,
92
+ aa_mask2 = aa_scale2 - 1
93
+ };
94
+
95
+ //--------------------------------------------------------------------
96
+ rasterizer_scanline_aa() :
97
+ m_outline(),
98
+ m_clipper(),
99
+ m_filling_rule(fill_non_zero),
100
+ m_auto_close(true),
101
+ m_start_x(0),
102
+ m_start_y(0),
103
+ m_status(status_initial)
104
+ {
105
+ int i;
106
+ for(i = 0; i < aa_scale; i++) m_gamma[i] = i;
107
+ }
108
+
109
+ //--------------------------------------------------------------------
110
+ template<class GammaF>
111
+ rasterizer_scanline_aa(const GammaF& gamma_function) :
112
+ m_outline(),
113
+ m_clipper(m_outline),
114
+ m_filling_rule(fill_non_zero),
115
+ m_auto_close(true),
116
+ m_start_x(0),
117
+ m_start_y(0),
118
+ m_status(status_initial)
119
+ {
120
+ gamma(gamma_function);
121
+ }
122
+
123
+ //--------------------------------------------------------------------
124
+ void reset();
125
+ void reset_clipping();
126
+ void clip_box(double x1, double y1, double x2, double y2);
127
+ void filling_rule(filling_rule_e filling_rule);
128
+ void auto_close(bool flag) { m_auto_close = flag; }
129
+
130
+ //--------------------------------------------------------------------
131
+ template<class GammaF> void gamma(const GammaF& gamma_function)
132
+ {
133
+ int i;
134
+ for(i = 0; i < aa_scale; i++)
135
+ {
136
+ m_gamma[i] = uround(gamma_function(double(i) / aa_mask) * aa_mask);
137
+ }
138
+ }
139
+
140
+ //--------------------------------------------------------------------
141
+ unsigned apply_gamma(unsigned cover) const
142
+ {
143
+ return m_gamma[cover];
144
+ }
145
+
146
+ //--------------------------------------------------------------------
147
+ void move_to(int x, int y);
148
+ void line_to(int x, int y);
149
+ void move_to_d(double x, double y);
150
+ void line_to_d(double x, double y);
151
+ void close_polygon();
152
+ void add_vertex(double x, double y, unsigned cmd);
153
+
154
+ void edge(int x1, int y1, int x2, int y2);
155
+ void edge_d(double x1, double y1, double x2, double y2);
156
+
157
+ //-------------------------------------------------------------------
158
+ template<class VertexSource>
159
+ void add_path(VertexSource &&vs, unsigned path_id=0)
160
+ {
161
+ double x;
162
+ double y;
163
+
164
+ unsigned cmd;
165
+ vs.rewind(path_id);
166
+ if(m_outline.sorted()) reset();
167
+ while(!is_stop(cmd = vs.vertex(&x, &y)))
168
+ {
169
+ add_vertex(x, y, cmd);
170
+ }
171
+ }
172
+
173
+ //--------------------------------------------------------------------
174
+ int min_x() const { return m_outline.min_x(); }
175
+ int min_y() const { return m_outline.min_y(); }
176
+ int max_x() const { return m_outline.max_x(); }
177
+ int max_y() const { return m_outline.max_y(); }
178
+
179
+ //--------------------------------------------------------------------
180
+ void sort();
181
+ bool rewind_scanlines();
182
+ bool navigate_scanline(int y);
183
+
184
+ //--------------------------------------------------------------------
185
+ AGG_INLINE unsigned calculate_alpha(int area) const
186
+ {
187
+ int cover = area >> (poly_subpixel_shift*2 + 1 - aa_shift);
188
+
189
+ if(cover < 0) cover = -cover;
190
+ if(m_filling_rule == fill_even_odd)
191
+ {
192
+ cover &= aa_mask2;
193
+ if(cover > aa_scale)
194
+ {
195
+ cover = aa_scale2 - cover;
196
+ }
197
+ }
198
+ if(cover > aa_mask) cover = aa_mask;
199
+ return m_gamma[cover];
200
+ }
201
+
202
+ //--------------------------------------------------------------------
203
+ template<class Scanline> bool sweep_scanline(Scanline& sl)
204
+ {
205
+ for(;;)
206
+ {
207
+ if(m_scan_y > m_outline.max_y()) return false;
208
+ sl.reset_spans();
209
+ unsigned num_cells = m_outline.scanline_num_cells(m_scan_y);
210
+ const cell_aa* const* cells = m_outline.scanline_cells(m_scan_y);
211
+ int cover = 0;
212
+
213
+ while(num_cells)
214
+ {
215
+ const cell_aa* cur_cell = *cells;
216
+ int x = cur_cell->x;
217
+ int area = cur_cell->area;
218
+ unsigned alpha;
219
+
220
+ cover += cur_cell->cover;
221
+
222
+ //accumulate all cells with the same X
223
+ while(--num_cells)
224
+ {
225
+ cur_cell = *++cells;
226
+ if(cur_cell->x != x) break;
227
+ area += cur_cell->area;
228
+ cover += cur_cell->cover;
229
+ }
230
+
231
+ if(area)
232
+ {
233
+ alpha = calculate_alpha((cover << (poly_subpixel_shift + 1)) - area);
234
+ if(alpha)
235
+ {
236
+ sl.add_cell(x, alpha);
237
+ }
238
+ x++;
239
+ }
240
+
241
+ if(num_cells && cur_cell->x > x)
242
+ {
243
+ alpha = calculate_alpha(cover << (poly_subpixel_shift + 1));
244
+ if(alpha)
245
+ {
246
+ sl.add_span(x, cur_cell->x - x, alpha);
247
+ }
248
+ }
249
+ }
250
+
251
+ if(sl.num_spans()) break;
252
+ ++m_scan_y;
253
+ }
254
+
255
+ sl.finalize(m_scan_y);
256
+ ++m_scan_y;
257
+ return true;
258
+ }
259
+
260
+ //--------------------------------------------------------------------
261
+ bool hit_test(int tx, int ty);
262
+
263
+
264
+ private:
265
+ //--------------------------------------------------------------------
266
+ // Disable copying
267
+ rasterizer_scanline_aa(const rasterizer_scanline_aa<Clip>&);
268
+ const rasterizer_scanline_aa<Clip>&
269
+ operator = (const rasterizer_scanline_aa<Clip>&);
270
+
271
+ private:
272
+ rasterizer_cells_aa<cell_aa> m_outline;
273
+ clip_type m_clipper;
274
+ int m_gamma[aa_scale];
275
+ filling_rule_e m_filling_rule;
276
+ bool m_auto_close;
277
+ coord_type m_start_x;
278
+ coord_type m_start_y;
279
+ unsigned m_status;
280
+ int m_scan_y;
281
+ };
282
+
283
+
284
+
285
+
286
+
287
+
288
+
289
+
290
+
291
+
292
+
293
+
294
+ //------------------------------------------------------------------------
295
+ template<class Clip>
296
+ void rasterizer_scanline_aa<Clip>::reset()
297
+ {
298
+ m_outline.reset();
299
+ m_status = status_initial;
300
+ }
301
+
302
+ //------------------------------------------------------------------------
303
+ template<class Clip>
304
+ void rasterizer_scanline_aa<Clip>::filling_rule(filling_rule_e filling_rule)
305
+ {
306
+ m_filling_rule = filling_rule;
307
+ }
308
+
309
+ //------------------------------------------------------------------------
310
+ template<class Clip>
311
+ void rasterizer_scanline_aa<Clip>::clip_box(double x1, double y1,
312
+ double x2, double y2)
313
+ {
314
+ reset();
315
+ m_clipper.clip_box(conv_type::upscale(x1), conv_type::upscale(y1),
316
+ conv_type::upscale(x2), conv_type::upscale(y2));
317
+ }
318
+
319
+ //------------------------------------------------------------------------
320
+ template<class Clip>
321
+ void rasterizer_scanline_aa<Clip>::reset_clipping()
322
+ {
323
+ reset();
324
+ m_clipper.reset_clipping();
325
+ }
326
+
327
+ //------------------------------------------------------------------------
328
+ template<class Clip>
329
+ void rasterizer_scanline_aa<Clip>::close_polygon()
330
+ {
331
+ if(m_status == status_line_to)
332
+ {
333
+ m_clipper.line_to(m_outline, m_start_x, m_start_y);
334
+ m_status = status_closed;
335
+ }
336
+ }
337
+
338
+ //------------------------------------------------------------------------
339
+ template<class Clip>
340
+ void rasterizer_scanline_aa<Clip>::move_to(int x, int y)
341
+ {
342
+ if(m_outline.sorted()) reset();
343
+ if(m_auto_close) close_polygon();
344
+ m_clipper.move_to(m_start_x = conv_type::downscale(x),
345
+ m_start_y = conv_type::downscale(y));
346
+ m_status = status_move_to;
347
+ }
348
+
349
+ //------------------------------------------------------------------------
350
+ template<class Clip>
351
+ void rasterizer_scanline_aa<Clip>::line_to(int x, int y)
352
+ {
353
+ m_clipper.line_to(m_outline,
354
+ conv_type::downscale(x),
355
+ conv_type::downscale(y));
356
+ m_status = status_line_to;
357
+ }
358
+
359
+ //------------------------------------------------------------------------
360
+ template<class Clip>
361
+ void rasterizer_scanline_aa<Clip>::move_to_d(double x, double y)
362
+ {
363
+ if(m_outline.sorted()) reset();
364
+ if(m_auto_close) close_polygon();
365
+ m_clipper.move_to(m_start_x = conv_type::upscale(x),
366
+ m_start_y = conv_type::upscale(y));
367
+ m_status = status_move_to;
368
+ }
369
+
370
+ //------------------------------------------------------------------------
371
+ template<class Clip>
372
+ void rasterizer_scanline_aa<Clip>::line_to_d(double x, double y)
373
+ {
374
+ m_clipper.line_to(m_outline,
375
+ conv_type::upscale(x),
376
+ conv_type::upscale(y));
377
+ m_status = status_line_to;
378
+ }
379
+
380
+ //------------------------------------------------------------------------
381
+ template<class Clip>
382
+ void rasterizer_scanline_aa<Clip>::add_vertex(double x, double y, unsigned cmd)
383
+ {
384
+ if(is_move_to(cmd))
385
+ {
386
+ move_to_d(x, y);
387
+ }
388
+ else
389
+ if(is_vertex(cmd))
390
+ {
391
+ line_to_d(x, y);
392
+ }
393
+ else
394
+ if(is_close(cmd))
395
+ {
396
+ close_polygon();
397
+ }
398
+ }
399
+
400
+ //------------------------------------------------------------------------
401
+ template<class Clip>
402
+ void rasterizer_scanline_aa<Clip>::edge(int x1, int y1, int x2, int y2)
403
+ {
404
+ if(m_outline.sorted()) reset();
405
+ m_clipper.move_to(conv_type::downscale(x1), conv_type::downscale(y1));
406
+ m_clipper.line_to(m_outline,
407
+ conv_type::downscale(x2),
408
+ conv_type::downscale(y2));
409
+ m_status = status_move_to;
410
+ }
411
+
412
+ //------------------------------------------------------------------------
413
+ template<class Clip>
414
+ void rasterizer_scanline_aa<Clip>::edge_d(double x1, double y1,
415
+ double x2, double y2)
416
+ {
417
+ if(m_outline.sorted()) reset();
418
+ m_clipper.move_to(conv_type::upscale(x1), conv_type::upscale(y1));
419
+ m_clipper.line_to(m_outline,
420
+ conv_type::upscale(x2),
421
+ conv_type::upscale(y2));
422
+ m_status = status_move_to;
423
+ }
424
+
425
+ //------------------------------------------------------------------------
426
+ template<class Clip>
427
+ void rasterizer_scanline_aa<Clip>::sort()
428
+ {
429
+ if(m_auto_close) close_polygon();
430
+ m_outline.sort_cells();
431
+ }
432
+
433
+ //------------------------------------------------------------------------
434
+ template<class Clip>
435
+ AGG_INLINE bool rasterizer_scanline_aa<Clip>::rewind_scanlines()
436
+ {
437
+ if(m_auto_close) close_polygon();
438
+ m_outline.sort_cells();
439
+ if(m_outline.total_cells() == 0)
440
+ {
441
+ return false;
442
+ }
443
+ m_scan_y = m_outline.min_y();
444
+ return true;
445
+ }
446
+
447
+
448
+ //------------------------------------------------------------------------
449
+ template<class Clip>
450
+ AGG_INLINE bool rasterizer_scanline_aa<Clip>::navigate_scanline(int y)
451
+ {
452
+ if(m_auto_close) close_polygon();
453
+ m_outline.sort_cells();
454
+ if(m_outline.total_cells() == 0 ||
455
+ y < m_outline.min_y() ||
456
+ y > m_outline.max_y())
457
+ {
458
+ return false;
459
+ }
460
+ m_scan_y = y;
461
+ return true;
462
+ }
463
+
464
+ //------------------------------------------------------------------------
465
+ template<class Clip>
466
+ bool rasterizer_scanline_aa<Clip>::hit_test(int tx, int ty)
467
+ {
468
+ if(!navigate_scanline(ty)) return false;
469
+ scanline_hit_test sl(tx);
470
+ sweep_scanline(sl);
471
+ return sl.hit();
472
+ }
473
+
474
+
475
+
476
+ }
477
+
478
+
479
+
480
+ #endif
481
+
data/bundled_deps/agg/agg/agg_rasterizer_scanline_aa_nogamma.h ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ //
12
+ // The author gratefully acknowleges the support of David Turner,
13
+ // Robert Wilhelm, and Werner Lemberg - the authors of the FreeType
14
+ // libray - in producing this work. See http://www.freetype.org for details.
15
+ //
16
+ //----------------------------------------------------------------------------
17
+ // Contact: [email protected]
18
19
+ // http://www.antigrain.com
20
+ //----------------------------------------------------------------------------
21
+ //
22
+ // Adaptation for 32-bit screen coordinates has been sponsored by
23
+ // Liberty Technology Systems, Inc., visit http://lib-sys.com
24
+ //
25
+ // Liberty Technology Systems, Inc. is the provider of
26
+ // PostScript and PDF technology for software developers.
27
+ //
28
+ //----------------------------------------------------------------------------
29
+ #ifndef AGG_RASTERIZER_SCANLINE_AA_NOGAMMA_INCLUDED
30
+ #define AGG_RASTERIZER_SCANLINE_AA_NOGAMMA_INCLUDED
31
+
32
+ #include <limits>
33
+ #include "agg_rasterizer_cells_aa.h"
34
+ #include "agg_rasterizer_sl_clip.h"
35
+
36
+
37
+ namespace agg
38
+ {
39
+
40
+
41
+ //-----------------------------------------------------------------cell_aa
42
+ // A pixel cell. There're no constructors defined and it was done
43
+ // intentionally in order to avoid extra overhead when allocating an
44
+ // array of cells.
45
+ struct cell_aa
46
+ {
47
+ int x;
48
+ int y;
49
+ int cover;
50
+ int area;
51
+
52
+ void initial()
53
+ {
54
+ x = std::numeric_limits<int>::max();
55
+ y = std::numeric_limits<int>::max();
56
+ cover = 0;
57
+ area = 0;
58
+ }
59
+
60
+ void style(const cell_aa&) {}
61
+
62
+ int not_equal(int ex, int ey, const cell_aa&) const
63
+ {
64
+ return ((unsigned)ex - (unsigned)x) | ((unsigned)ey - (unsigned)y);
65
+ }
66
+ };
67
+
68
+
69
+ //==================================================rasterizer_scanline_aa_nogamma
70
+ // Polygon rasterizer that is used to render filled polygons with
71
+ // high-quality Anti-Aliasing. Internally, by default, the class uses
72
+ // integer coordinates in format 24.8, i.e. 24 bits for integer part
73
+ // and 8 bits for fractional - see poly_subpixel_shift. This class can be
74
+ // used in the following way:
75
+ //
76
+ // 1. filling_rule(filling_rule_e ft) - optional.
77
+ //
78
+ // 2. gamma() - optional.
79
+ //
80
+ // 3. reset()
81
+ //
82
+ // 4. move_to(x, y) / line_to(x, y) - make the polygon. One can create
83
+ // more than one contour, but each contour must consist of at least 3
84
+ // vertices, i.e. move_to(x1, y1); line_to(x2, y2); line_to(x3, y3);
85
+ // is the absolute minimum of vertices that define a triangle.
86
+ // The algorithm does not check either the number of vertices nor
87
+ // coincidence of their coordinates, but in the worst case it just
88
+ // won't draw anything.
89
+ // The orger of the vertices (clockwise or counterclockwise)
90
+ // is important when using the non-zero filling rule (fill_non_zero).
91
+ // In this case the vertex order of all the contours must be the same
92
+ // if you want your intersecting polygons to be without "holes".
93
+ // You actually can use different vertices order. If the contours do not
94
+ // intersect each other the order is not important anyway. If they do,
95
+ // contours with the same vertex order will be rendered without "holes"
96
+ // while the intersecting contours with different orders will have "holes".
97
+ //
98
+ // filling_rule() and gamma() can be called anytime before "sweeping".
99
+ //------------------------------------------------------------------------
100
+ template<class Clip=rasterizer_sl_clip_int> class rasterizer_scanline_aa_nogamma
101
+ {
102
+ enum status
103
+ {
104
+ status_initial,
105
+ status_move_to,
106
+ status_line_to,
107
+ status_closed
108
+ };
109
+
110
+ public:
111
+ typedef Clip clip_type;
112
+ typedef typename Clip::conv_type conv_type;
113
+ typedef typename Clip::coord_type coord_type;
114
+
115
+ enum aa_scale_e
116
+ {
117
+ aa_shift = 8,
118
+ aa_scale = 1 << aa_shift,
119
+ aa_mask = aa_scale - 1,
120
+ aa_scale2 = aa_scale * 2,
121
+ aa_mask2 = aa_scale2 - 1
122
+ };
123
+
124
+ //--------------------------------------------------------------------
125
+ rasterizer_scanline_aa_nogamma() :
126
+ m_outline(),
127
+ m_clipper(),
128
+ m_filling_rule(fill_non_zero),
129
+ m_auto_close(true),
130
+ m_start_x(0),
131
+ m_start_y(0),
132
+ m_status(status_initial)
133
+ {
134
+ }
135
+
136
+ //--------------------------------------------------------------------
137
+ void reset();
138
+ void reset_clipping();
139
+ void clip_box(double x1, double y1, double x2, double y2);
140
+ void filling_rule(filling_rule_e filling_rule);
141
+ void auto_close(bool flag) { m_auto_close = flag; }
142
+
143
+ //--------------------------------------------------------------------
144
+ unsigned apply_gamma(unsigned cover) const
145
+ {
146
+ return cover;
147
+ }
148
+
149
+ //--------------------------------------------------------------------
150
+ void move_to(int x, int y);
151
+ void line_to(int x, int y);
152
+ void move_to_d(double x, double y);
153
+ void line_to_d(double x, double y);
154
+ void close_polygon();
155
+ void add_vertex(double x, double y, unsigned cmd);
156
+
157
+ void edge(int x1, int y1, int x2, int y2);
158
+ void edge_d(double x1, double y1, double x2, double y2);
159
+
160
+ //-------------------------------------------------------------------
161
+ template<class VertexSource>
162
+ void add_path(VertexSource& vs, unsigned path_id=0)
163
+ {
164
+ double x;
165
+ double y;
166
+
167
+ unsigned cmd;
168
+ vs.rewind(path_id);
169
+ if(m_outline.sorted()) reset();
170
+ while(!is_stop(cmd = vs.vertex(&x, &y)))
171
+ {
172
+ add_vertex(x, y, cmd);
173
+ }
174
+ }
175
+
176
+ //--------------------------------------------------------------------
177
+ int min_x() const { return m_outline.min_x(); }
178
+ int min_y() const { return m_outline.min_y(); }
179
+ int max_x() const { return m_outline.max_x(); }
180
+ int max_y() const { return m_outline.max_y(); }
181
+
182
+ //--------------------------------------------------------------------
183
+ void sort();
184
+ bool rewind_scanlines();
185
+ bool navigate_scanline(int y);
186
+
187
+ //--------------------------------------------------------------------
188
+ AGG_INLINE unsigned calculate_alpha(int area) const
189
+ {
190
+ int cover = area >> (poly_subpixel_shift*2 + 1 - aa_shift);
191
+
192
+ if(cover < 0) cover = -cover;
193
+ if(m_filling_rule == fill_even_odd)
194
+ {
195
+ cover &= aa_mask2;
196
+ if(cover > aa_scale)
197
+ {
198
+ cover = aa_scale2 - cover;
199
+ }
200
+ }
201
+ if(cover > aa_mask) cover = aa_mask;
202
+ return cover;
203
+ }
204
+
205
+ //--------------------------------------------------------------------
206
+ template<class Scanline> bool sweep_scanline(Scanline& sl)
207
+ {
208
+ for(;;)
209
+ {
210
+ if(m_scan_y > m_outline.max_y()) return false;
211
+ sl.reset_spans();
212
+ unsigned num_cells = m_outline.scanline_num_cells(m_scan_y);
213
+ const cell_aa* const* cells = m_outline.scanline_cells(m_scan_y);
214
+ int cover = 0;
215
+
216
+ while(num_cells)
217
+ {
218
+ const cell_aa* cur_cell = *cells;
219
+ int x = cur_cell->x;
220
+ int area = cur_cell->area;
221
+ unsigned alpha;
222
+
223
+ cover += cur_cell->cover;
224
+
225
+ //accumulate all cells with the same X
226
+ while(--num_cells)
227
+ {
228
+ cur_cell = *++cells;
229
+ if(cur_cell->x != x) break;
230
+ area += cur_cell->area;
231
+ cover += cur_cell->cover;
232
+ }
233
+
234
+ if(area)
235
+ {
236
+ alpha = calculate_alpha((cover << (poly_subpixel_shift + 1)) - area);
237
+ if(alpha)
238
+ {
239
+ sl.add_cell(x, alpha);
240
+ }
241
+ x++;
242
+ }
243
+
244
+ if(num_cells && cur_cell->x > x)
245
+ {
246
+ alpha = calculate_alpha(cover << (poly_subpixel_shift + 1));
247
+ if(alpha)
248
+ {
249
+ sl.add_span(x, cur_cell->x - x, alpha);
250
+ }
251
+ }
252
+ }
253
+
254
+ if(sl.num_spans()) break;
255
+ ++m_scan_y;
256
+ }
257
+
258
+ sl.finalize(m_scan_y);
259
+ ++m_scan_y;
260
+ return true;
261
+ }
262
+
263
+ //--------------------------------------------------------------------
264
+ bool hit_test(int tx, int ty);
265
+
266
+
267
+ private:
268
+ //--------------------------------------------------------------------
269
+ // Disable copying
270
+ rasterizer_scanline_aa_nogamma(const rasterizer_scanline_aa_nogamma<Clip>&);
271
+ const rasterizer_scanline_aa_nogamma<Clip>&
272
+ operator = (const rasterizer_scanline_aa_nogamma<Clip>&);
273
+
274
+ private:
275
+ rasterizer_cells_aa<cell_aa> m_outline;
276
+ clip_type m_clipper;
277
+ filling_rule_e m_filling_rule;
278
+ bool m_auto_close;
279
+ coord_type m_start_x;
280
+ coord_type m_start_y;
281
+ unsigned m_status;
282
+ int m_scan_y;
283
+ };
284
+
285
+
286
+
287
+
288
+
289
+
290
+
291
+
292
+
293
+
294
+
295
+
296
+ //------------------------------------------------------------------------
297
+ template<class Clip>
298
+ void rasterizer_scanline_aa_nogamma<Clip>::reset()
299
+ {
300
+ m_outline.reset();
301
+ m_status = status_initial;
302
+ }
303
+
304
+ //------------------------------------------------------------------------
305
+ template<class Clip>
306
+ void rasterizer_scanline_aa_nogamma<Clip>::filling_rule(filling_rule_e filling_rule)
307
+ {
308
+ m_filling_rule = filling_rule;
309
+ }
310
+
311
+ //------------------------------------------------------------------------
312
+ template<class Clip>
313
+ void rasterizer_scanline_aa_nogamma<Clip>::clip_box(double x1, double y1,
314
+ double x2, double y2)
315
+ {
316
+ reset();
317
+ m_clipper.clip_box(conv_type::upscale(x1), conv_type::upscale(y1),
318
+ conv_type::upscale(x2), conv_type::upscale(y2));
319
+ }
320
+
321
+ //------------------------------------------------------------------------
322
+ template<class Clip>
323
+ void rasterizer_scanline_aa_nogamma<Clip>::reset_clipping()
324
+ {
325
+ reset();
326
+ m_clipper.reset_clipping();
327
+ }
328
+
329
+ //------------------------------------------------------------------------
330
+ template<class Clip>
331
+ void rasterizer_scanline_aa_nogamma<Clip>::close_polygon()
332
+ {
333
+ if(m_status == status_line_to)
334
+ {
335
+ m_clipper.line_to(m_outline, m_start_x, m_start_y);
336
+ m_status = status_closed;
337
+ }
338
+ }
339
+
340
+ //------------------------------------------------------------------------
341
+ template<class Clip>
342
+ void rasterizer_scanline_aa_nogamma<Clip>::move_to(int x, int y)
343
+ {
344
+ if(m_outline.sorted()) reset();
345
+ if(m_auto_close) close_polygon();
346
+ m_clipper.move_to(m_start_x = conv_type::downscale(x),
347
+ m_start_y = conv_type::downscale(y));
348
+ m_status = status_move_to;
349
+ }
350
+
351
+ //------------------------------------------------------------------------
352
+ template<class Clip>
353
+ void rasterizer_scanline_aa_nogamma<Clip>::line_to(int x, int y)
354
+ {
355
+ m_clipper.line_to(m_outline,
356
+ conv_type::downscale(x),
357
+ conv_type::downscale(y));
358
+ m_status = status_line_to;
359
+ }
360
+
361
+ //------------------------------------------------------------------------
362
+ template<class Clip>
363
+ void rasterizer_scanline_aa_nogamma<Clip>::move_to_d(double x, double y)
364
+ {
365
+ if(m_outline.sorted()) reset();
366
+ if(m_auto_close) close_polygon();
367
+ m_clipper.move_to(m_start_x = conv_type::upscale(x),
368
+ m_start_y = conv_type::upscale(y));
369
+ m_status = status_move_to;
370
+ }
371
+
372
+ //------------------------------------------------------------------------
373
+ template<class Clip>
374
+ void rasterizer_scanline_aa_nogamma<Clip>::line_to_d(double x, double y)
375
+ {
376
+ m_clipper.line_to(m_outline,
377
+ conv_type::upscale(x),
378
+ conv_type::upscale(y));
379
+ m_status = status_line_to;
380
+ }
381
+
382
+ //------------------------------------------------------------------------
383
+ template<class Clip>
384
+ void rasterizer_scanline_aa_nogamma<Clip>::add_vertex(double x, double y, unsigned cmd)
385
+ {
386
+ if(is_move_to(cmd))
387
+ {
388
+ move_to_d(x, y);
389
+ }
390
+ else
391
+ if(is_vertex(cmd))
392
+ {
393
+ line_to_d(x, y);
394
+ }
395
+ else
396
+ if(is_close(cmd))
397
+ {
398
+ close_polygon();
399
+ }
400
+ }
401
+
402
+ //------------------------------------------------------------------------
403
+ template<class Clip>
404
+ void rasterizer_scanline_aa_nogamma<Clip>::edge(int x1, int y1, int x2, int y2)
405
+ {
406
+ if(m_outline.sorted()) reset();
407
+ m_clipper.move_to(conv_type::downscale(x1), conv_type::downscale(y1));
408
+ m_clipper.line_to(m_outline,
409
+ conv_type::downscale(x2),
410
+ conv_type::downscale(y2));
411
+ m_status = status_move_to;
412
+ }
413
+
414
+ //------------------------------------------------------------------------
415
+ template<class Clip>
416
+ void rasterizer_scanline_aa_nogamma<Clip>::edge_d(double x1, double y1,
417
+ double x2, double y2)
418
+ {
419
+ if(m_outline.sorted()) reset();
420
+ m_clipper.move_to(conv_type::upscale(x1), conv_type::upscale(y1));
421
+ m_clipper.line_to(m_outline,
422
+ conv_type::upscale(x2),
423
+ conv_type::upscale(y2));
424
+ m_status = status_move_to;
425
+ }
426
+
427
+ //------------------------------------------------------------------------
428
+ template<class Clip>
429
+ void rasterizer_scanline_aa_nogamma<Clip>::sort()
430
+ {
431
+ if(m_auto_close) close_polygon();
432
+ m_outline.sort_cells();
433
+ }
434
+
435
+ //------------------------------------------------------------------------
436
+ template<class Clip>
437
+ AGG_INLINE bool rasterizer_scanline_aa_nogamma<Clip>::rewind_scanlines()
438
+ {
439
+ if(m_auto_close) close_polygon();
440
+ m_outline.sort_cells();
441
+ if(m_outline.total_cells() == 0)
442
+ {
443
+ return false;
444
+ }
445
+ m_scan_y = m_outline.min_y();
446
+ return true;
447
+ }
448
+
449
+
450
+ //------------------------------------------------------------------------
451
+ template<class Clip>
452
+ AGG_INLINE bool rasterizer_scanline_aa_nogamma<Clip>::navigate_scanline(int y)
453
+ {
454
+ if(m_auto_close) close_polygon();
455
+ m_outline.sort_cells();
456
+ if(m_outline.total_cells() == 0 ||
457
+ y < m_outline.min_y() ||
458
+ y > m_outline.max_y())
459
+ {
460
+ return false;
461
+ }
462
+ m_scan_y = y;
463
+ return true;
464
+ }
465
+
466
+ //------------------------------------------------------------------------
467
+ template<class Clip>
468
+ bool rasterizer_scanline_aa_nogamma<Clip>::hit_test(int tx, int ty)
469
+ {
470
+ if(!navigate_scanline(ty)) return false;
471
+ scanline_hit_test sl(tx);
472
+ sweep_scanline(sl);
473
+ return sl.hit();
474
+ }
475
+
476
+
477
+
478
+ }
479
+
480
+
481
+
482
+ #endif
483
+
data/bundled_deps/agg/agg/agg_rasterizer_sl_clip.h ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+ #ifndef AGG_RASTERIZER_SL_CLIP_INCLUDED
16
+ #define AGG_RASTERIZER_SL_CLIP_INCLUDED
17
+
18
+ #include "agg_clip_liang_barsky.h"
19
+
20
+ namespace agg
21
+ {
22
+ //--------------------------------------------------------poly_max_coord_e
23
+ enum poly_max_coord_e
24
+ {
25
+ poly_max_coord = (1 << 30) - 1 //----poly_max_coord
26
+ };
27
+
28
+ //------------------------------------------------------------ras_conv_int
29
+ struct ras_conv_int
30
+ {
31
+ typedef int coord_type;
32
+ static AGG_INLINE int mul_div(double a, double b, double c)
33
+ {
34
+ return iround(a * b / c);
35
+ }
36
+ static int xi(int v) { return v; }
37
+ static int yi(int v) { return v; }
38
+ static int upscale(double v) { return iround(v * poly_subpixel_scale); }
39
+ static int downscale(int v) { return v; }
40
+ };
41
+
42
+ //--------------------------------------------------------ras_conv_int_sat
43
+ struct ras_conv_int_sat
44
+ {
45
+ typedef int coord_type;
46
+ static AGG_INLINE int mul_div(double a, double b, double c)
47
+ {
48
+ return saturation<poly_max_coord>::iround(a * b / c);
49
+ }
50
+ static int xi(int v) { return v; }
51
+ static int yi(int v) { return v; }
52
+ static int upscale(double v)
53
+ {
54
+ return saturation<poly_max_coord>::iround(v * poly_subpixel_scale);
55
+ }
56
+ static int downscale(int v) { return v; }
57
+ };
58
+
59
+ //---------------------------------------------------------ras_conv_int_3x
60
+ struct ras_conv_int_3x
61
+ {
62
+ typedef int coord_type;
63
+ static AGG_INLINE int mul_div(double a, double b, double c)
64
+ {
65
+ return iround(a * b / c);
66
+ }
67
+ static int xi(int v) { return v * 3; }
68
+ static int yi(int v) { return v; }
69
+ static int upscale(double v) { return iround(v * poly_subpixel_scale); }
70
+ static int downscale(int v) { return v; }
71
+ };
72
+
73
+ //-----------------------------------------------------------ras_conv_dbl
74
+ struct ras_conv_dbl
75
+ {
76
+ typedef double coord_type;
77
+ static AGG_INLINE double mul_div(double a, double b, double c)
78
+ {
79
+ return a * b / c;
80
+ }
81
+ static int xi(double v) { return iround(v * poly_subpixel_scale); }
82
+ static int yi(double v) { return iround(v * poly_subpixel_scale); }
83
+ static double upscale(double v) { return v; }
84
+ static double downscale(int v) { return v / double(poly_subpixel_scale); }
85
+ };
86
+
87
+ //--------------------------------------------------------ras_conv_dbl_3x
88
+ struct ras_conv_dbl_3x
89
+ {
90
+ typedef double coord_type;
91
+ static AGG_INLINE double mul_div(double a, double b, double c)
92
+ {
93
+ return a * b / c;
94
+ }
95
+ static int xi(double v) { return iround(v * poly_subpixel_scale * 3); }
96
+ static int yi(double v) { return iround(v * poly_subpixel_scale); }
97
+ static double upscale(double v) { return v; }
98
+ static double downscale(int v) { return v / double(poly_subpixel_scale); }
99
+ };
100
+
101
+
102
+
103
+
104
+
105
+ //------------------------------------------------------rasterizer_sl_clip
106
+ template<class Conv> class rasterizer_sl_clip
107
+ {
108
+ public:
109
+ typedef Conv conv_type;
110
+ typedef typename Conv::coord_type coord_type;
111
+ typedef rect_base<coord_type> rect_type;
112
+
113
+ //--------------------------------------------------------------------
114
+ rasterizer_sl_clip() :
115
+ m_clip_box(0,0,0,0),
116
+ m_x1(0),
117
+ m_y1(0),
118
+ m_f1(0),
119
+ m_clipping(false)
120
+ {}
121
+
122
+ //--------------------------------------------------------------------
123
+ void reset_clipping()
124
+ {
125
+ m_clipping = false;
126
+ }
127
+
128
+ //--------------------------------------------------------------------
129
+ void clip_box(coord_type x1, coord_type y1, coord_type x2, coord_type y2)
130
+ {
131
+ m_clip_box = rect_type(x1, y1, x2, y2);
132
+ m_clip_box.normalize();
133
+ m_clipping = true;
134
+ }
135
+
136
+ //--------------------------------------------------------------------
137
+ void move_to(coord_type x1, coord_type y1)
138
+ {
139
+ m_x1 = x1;
140
+ m_y1 = y1;
141
+ if(m_clipping) m_f1 = clipping_flags(x1, y1, m_clip_box);
142
+ }
143
+
144
+ private:
145
+ //------------------------------------------------------------------------
146
+ template<class Rasterizer>
147
+ AGG_INLINE void line_clip_y(Rasterizer& ras,
148
+ coord_type x1, coord_type y1,
149
+ coord_type x2, coord_type y2,
150
+ unsigned f1, unsigned f2) const
151
+ {
152
+ f1 &= 10;
153
+ f2 &= 10;
154
+ if((f1 | f2) == 0)
155
+ {
156
+ // Fully visible
157
+ ras.line(Conv::xi(x1), Conv::yi(y1), Conv::xi(x2), Conv::yi(y2));
158
+ }
159
+ else
160
+ {
161
+ if(f1 == f2)
162
+ {
163
+ // Invisible by Y
164
+ return;
165
+ }
166
+
167
+ coord_type tx1 = x1;
168
+ coord_type ty1 = y1;
169
+ coord_type tx2 = x2;
170
+ coord_type ty2 = y2;
171
+
172
+ if(f1 & 8) // y1 < clip.y1
173
+ {
174
+ tx1 = x1 + Conv::mul_div(m_clip_box.y1-y1, x2-x1, y2-y1);
175
+ ty1 = m_clip_box.y1;
176
+ }
177
+
178
+ if(f1 & 2) // y1 > clip.y2
179
+ {
180
+ tx1 = x1 + Conv::mul_div(m_clip_box.y2-y1, x2-x1, y2-y1);
181
+ ty1 = m_clip_box.y2;
182
+ }
183
+
184
+ if(f2 & 8) // y2 < clip.y1
185
+ {
186
+ tx2 = x1 + Conv::mul_div(m_clip_box.y1-y1, x2-x1, y2-y1);
187
+ ty2 = m_clip_box.y1;
188
+ }
189
+
190
+ if(f2 & 2) // y2 > clip.y2
191
+ {
192
+ tx2 = x1 + Conv::mul_div(m_clip_box.y2-y1, x2-x1, y2-y1);
193
+ ty2 = m_clip_box.y2;
194
+ }
195
+ ras.line(Conv::xi(tx1), Conv::yi(ty1),
196
+ Conv::xi(tx2), Conv::yi(ty2));
197
+ }
198
+ }
199
+
200
+
201
+ public:
202
+ //--------------------------------------------------------------------
203
+ template<class Rasterizer>
204
+ void line_to(Rasterizer& ras, coord_type x2, coord_type y2)
205
+ {
206
+ if(m_clipping)
207
+ {
208
+ unsigned f2 = clipping_flags(x2, y2, m_clip_box);
209
+
210
+ if((m_f1 & 10) == (f2 & 10) && (m_f1 & 10) != 0)
211
+ {
212
+ // Invisible by Y
213
+ m_x1 = x2;
214
+ m_y1 = y2;
215
+ m_f1 = f2;
216
+ return;
217
+ }
218
+
219
+ coord_type x1 = m_x1;
220
+ coord_type y1 = m_y1;
221
+ unsigned f1 = m_f1;
222
+ coord_type y3, y4;
223
+ unsigned f3, f4;
224
+
225
+ switch(((f1 & 5) << 1) | (f2 & 5))
226
+ {
227
+ case 0: // Visible by X
228
+ line_clip_y(ras, x1, y1, x2, y2, f1, f2);
229
+ break;
230
+
231
+ case 1: // x2 > clip.x2
232
+ y3 = y1 + Conv::mul_div(m_clip_box.x2-x1, y2-y1, x2-x1);
233
+ f3 = clipping_flags_y(y3, m_clip_box);
234
+ line_clip_y(ras, x1, y1, m_clip_box.x2, y3, f1, f3);
235
+ line_clip_y(ras, m_clip_box.x2, y3, m_clip_box.x2, y2, f3, f2);
236
+ break;
237
+
238
+ case 2: // x1 > clip.x2
239
+ y3 = y1 + Conv::mul_div(m_clip_box.x2-x1, y2-y1, x2-x1);
240
+ f3 = clipping_flags_y(y3, m_clip_box);
241
+ line_clip_y(ras, m_clip_box.x2, y1, m_clip_box.x2, y3, f1, f3);
242
+ line_clip_y(ras, m_clip_box.x2, y3, x2, y2, f3, f2);
243
+ break;
244
+
245
+ case 3: // x1 > clip.x2 && x2 > clip.x2
246
+ line_clip_y(ras, m_clip_box.x2, y1, m_clip_box.x2, y2, f1, f2);
247
+ break;
248
+
249
+ case 4: // x2 < clip.x1
250
+ y3 = y1 + Conv::mul_div(m_clip_box.x1-x1, y2-y1, x2-x1);
251
+ f3 = clipping_flags_y(y3, m_clip_box);
252
+ line_clip_y(ras, x1, y1, m_clip_box.x1, y3, f1, f3);
253
+ line_clip_y(ras, m_clip_box.x1, y3, m_clip_box.x1, y2, f3, f2);
254
+ break;
255
+
256
+ case 6: // x1 > clip.x2 && x2 < clip.x1
257
+ y3 = y1 + Conv::mul_div(m_clip_box.x2-x1, y2-y1, x2-x1);
258
+ y4 = y1 + Conv::mul_div(m_clip_box.x1-x1, y2-y1, x2-x1);
259
+ f3 = clipping_flags_y(y3, m_clip_box);
260
+ f4 = clipping_flags_y(y4, m_clip_box);
261
+ line_clip_y(ras, m_clip_box.x2, y1, m_clip_box.x2, y3, f1, f3);
262
+ line_clip_y(ras, m_clip_box.x2, y3, m_clip_box.x1, y4, f3, f4);
263
+ line_clip_y(ras, m_clip_box.x1, y4, m_clip_box.x1, y2, f4, f2);
264
+ break;
265
+
266
+ case 8: // x1 < clip.x1
267
+ y3 = y1 + Conv::mul_div(m_clip_box.x1-x1, y2-y1, x2-x1);
268
+ f3 = clipping_flags_y(y3, m_clip_box);
269
+ line_clip_y(ras, m_clip_box.x1, y1, m_clip_box.x1, y3, f1, f3);
270
+ line_clip_y(ras, m_clip_box.x1, y3, x2, y2, f3, f2);
271
+ break;
272
+
273
+ case 9: // x1 < clip.x1 && x2 > clip.x2
274
+ y3 = y1 + Conv::mul_div(m_clip_box.x1-x1, y2-y1, x2-x1);
275
+ y4 = y1 + Conv::mul_div(m_clip_box.x2-x1, y2-y1, x2-x1);
276
+ f3 = clipping_flags_y(y3, m_clip_box);
277
+ f4 = clipping_flags_y(y4, m_clip_box);
278
+ line_clip_y(ras, m_clip_box.x1, y1, m_clip_box.x1, y3, f1, f3);
279
+ line_clip_y(ras, m_clip_box.x1, y3, m_clip_box.x2, y4, f3, f4);
280
+ line_clip_y(ras, m_clip_box.x2, y4, m_clip_box.x2, y2, f4, f2);
281
+ break;
282
+
283
+ case 12: // x1 < clip.x1 && x2 < clip.x1
284
+ line_clip_y(ras, m_clip_box.x1, y1, m_clip_box.x1, y2, f1, f2);
285
+ break;
286
+ }
287
+ m_f1 = f2;
288
+ }
289
+ else
290
+ {
291
+ ras.line(Conv::xi(m_x1), Conv::yi(m_y1),
292
+ Conv::xi(x2), Conv::yi(y2));
293
+ }
294
+ m_x1 = x2;
295
+ m_y1 = y2;
296
+ }
297
+
298
+
299
+ private:
300
+ rect_type m_clip_box;
301
+ coord_type m_x1;
302
+ coord_type m_y1;
303
+ unsigned m_f1;
304
+ bool m_clipping;
305
+ };
306
+
307
+
308
+
309
+
310
+ //---------------------------------------------------rasterizer_sl_no_clip
311
+ class rasterizer_sl_no_clip
312
+ {
313
+ public:
314
+ typedef ras_conv_int conv_type;
315
+ typedef int coord_type;
316
+
317
+ rasterizer_sl_no_clip() : m_x1(0), m_y1(0) {}
318
+
319
+ void reset_clipping() {}
320
+ void clip_box(coord_type, coord_type, coord_type, coord_type) {}
321
+ void move_to(coord_type x1, coord_type y1) { m_x1 = x1; m_y1 = y1; }
322
+
323
+ template<class Rasterizer>
324
+ void line_to(Rasterizer& ras, coord_type x2, coord_type y2)
325
+ {
326
+ ras.line(m_x1, m_y1, x2, y2);
327
+ m_x1 = x2;
328
+ m_y1 = y2;
329
+ }
330
+
331
+ private:
332
+ int m_x1, m_y1;
333
+ };
334
+
335
+
336
+ // -----rasterizer_sl_clip_int
337
+ // -----rasterizer_sl_clip_int_sat
338
+ // -----rasterizer_sl_clip_int_3x
339
+ // -----rasterizer_sl_clip_dbl
340
+ // -----rasterizer_sl_clip_dbl_3x
341
+ //------------------------------------------------------------------------
342
+ typedef rasterizer_sl_clip<ras_conv_int> rasterizer_sl_clip_int;
343
+ typedef rasterizer_sl_clip<ras_conv_int_sat> rasterizer_sl_clip_int_sat;
344
+ typedef rasterizer_sl_clip<ras_conv_int_3x> rasterizer_sl_clip_int_3x;
345
+ typedef rasterizer_sl_clip<ras_conv_dbl> rasterizer_sl_clip_dbl;
346
+ typedef rasterizer_sl_clip<ras_conv_dbl_3x> rasterizer_sl_clip_dbl_3x;
347
+
348
+
349
+ }
350
+
351
+ #endif
data/bundled_deps/agg/agg/agg_renderer_base.h ADDED
@@ -0,0 +1,731 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+ //
16
+ // class renderer_base
17
+ //
18
+ //----------------------------------------------------------------------------
19
+
20
+ #ifndef AGG_RENDERER_BASE_INCLUDED
21
+ #define AGG_RENDERER_BASE_INCLUDED
22
+
23
+ #include "agg_basics.h"
24
+ #include "agg_rendering_buffer.h"
25
+
26
+ namespace agg
27
+ {
28
+
29
+ //-----------------------------------------------------------renderer_base
30
+ template<class PixelFormat> class renderer_base
31
+ {
32
+ public:
33
+ typedef PixelFormat pixfmt_type;
34
+ typedef typename pixfmt_type::color_type color_type;
35
+ typedef typename pixfmt_type::row_data row_data;
36
+
37
+ //--------------------------------------------------------------------
38
+ renderer_base() : m_ren(0), m_clip_box(1, 1, 0, 0) {}
39
+ explicit renderer_base(pixfmt_type& ren) :
40
+ m_ren(&ren),
41
+ m_clip_box(0, 0, ren.width() - 1, ren.height() - 1)
42
+ {}
43
+ void attach(pixfmt_type& ren)
44
+ {
45
+ m_ren = &ren;
46
+ m_clip_box = rect_i(0, 0, ren.width() - 1, ren.height() - 1);
47
+ }
48
+
49
+ //--------------------------------------------------------------------
50
+ const pixfmt_type& ren() const { return *m_ren; }
51
+ pixfmt_type& ren() { return *m_ren; }
52
+
53
+ //--------------------------------------------------------------------
54
+ unsigned width() const { return m_ren->width(); }
55
+ unsigned height() const { return m_ren->height(); }
56
+
57
+ //--------------------------------------------------------------------
58
+ bool clip_box(int x1, int y1, int x2, int y2)
59
+ {
60
+ rect_i cb(x1, y1, x2, y2);
61
+ cb.normalize();
62
+ if(cb.clip(rect_i(0, 0, width() - 1, height() - 1)))
63
+ {
64
+ m_clip_box = cb;
65
+ return true;
66
+ }
67
+ m_clip_box.x1 = 1;
68
+ m_clip_box.y1 = 1;
69
+ m_clip_box.x2 = 0;
70
+ m_clip_box.y2 = 0;
71
+ return false;
72
+ }
73
+
74
+ //--------------------------------------------------------------------
75
+ void reset_clipping(bool visibility)
76
+ {
77
+ if(visibility)
78
+ {
79
+ m_clip_box.x1 = 0;
80
+ m_clip_box.y1 = 0;
81
+ m_clip_box.x2 = width() - 1;
82
+ m_clip_box.y2 = height() - 1;
83
+ }
84
+ else
85
+ {
86
+ m_clip_box.x1 = 1;
87
+ m_clip_box.y1 = 1;
88
+ m_clip_box.x2 = 0;
89
+ m_clip_box.y2 = 0;
90
+ }
91
+ }
92
+
93
+ //--------------------------------------------------------------------
94
+ void clip_box_naked(int x1, int y1, int x2, int y2)
95
+ {
96
+ m_clip_box.x1 = x1;
97
+ m_clip_box.y1 = y1;
98
+ m_clip_box.x2 = x2;
99
+ m_clip_box.y2 = y2;
100
+ }
101
+
102
+ //--------------------------------------------------------------------
103
+ bool inbox(int x, int y) const
104
+ {
105
+ return x >= m_clip_box.x1 && y >= m_clip_box.y1 &&
106
+ x <= m_clip_box.x2 && y <= m_clip_box.y2;
107
+ }
108
+
109
+ //--------------------------------------------------------------------
110
+ const rect_i& clip_box() const { return m_clip_box; }
111
+ int xmin() const { return m_clip_box.x1; }
112
+ int ymin() const { return m_clip_box.y1; }
113
+ int xmax() const { return m_clip_box.x2; }
114
+ int ymax() const { return m_clip_box.y2; }
115
+
116
+ //--------------------------------------------------------------------
117
+ const rect_i& bounding_clip_box() const { return m_clip_box; }
118
+ int bounding_xmin() const { return m_clip_box.x1; }
119
+ int bounding_ymin() const { return m_clip_box.y1; }
120
+ int bounding_xmax() const { return m_clip_box.x2; }
121
+ int bounding_ymax() const { return m_clip_box.y2; }
122
+
123
+ //--------------------------------------------------------------------
124
+ void clear(const color_type& c)
125
+ {
126
+ unsigned y;
127
+ if(width())
128
+ {
129
+ for(y = 0; y < height(); y++)
130
+ {
131
+ m_ren->copy_hline(0, y, width(), c);
132
+ }
133
+ }
134
+ }
135
+
136
+
137
+ //--------------------------------------------------------------------
138
+ void fill(const color_type& c)
139
+ {
140
+ unsigned y;
141
+ if(width())
142
+ {
143
+ for(y = 0; y < height(); y++)
144
+ {
145
+ m_ren->blend_hline(0, y, width(), c, cover_mask);
146
+ }
147
+ }
148
+ }
149
+
150
+ //--------------------------------------------------------------------
151
+ void copy_pixel(int x, int y, const color_type& c)
152
+ {
153
+ if(inbox(x, y))
154
+ {
155
+ m_ren->copy_pixel(x, y, c);
156
+ }
157
+ }
158
+
159
+ //--------------------------------------------------------------------
160
+ void blend_pixel(int x, int y, const color_type& c, cover_type cover)
161
+ {
162
+ if(inbox(x, y))
163
+ {
164
+ m_ren->blend_pixel(x, y, c, cover);
165
+ }
166
+ }
167
+
168
+ //--------------------------------------------------------------------
169
+ color_type pixel(int x, int y) const
170
+ {
171
+ return inbox(x, y) ?
172
+ m_ren->pixel(x, y) :
173
+ color_type::no_color();
174
+ }
175
+
176
+ //--------------------------------------------------------------------
177
+ void copy_hline(int x1, int y, int x2, const color_type& c)
178
+ {
179
+ if(x1 > x2) { int t = x2; x2 = x1; x1 = t; }
180
+ if(y > ymax()) return;
181
+ if(y < ymin()) return;
182
+ if(x1 > xmax()) return;
183
+ if(x2 < xmin()) return;
184
+
185
+ if(x1 < xmin()) x1 = xmin();
186
+ if(x2 > xmax()) x2 = xmax();
187
+
188
+ m_ren->copy_hline(x1, y, x2 - x1 + 1, c);
189
+ }
190
+
191
+ //--------------------------------------------------------------------
192
+ void copy_vline(int x, int y1, int y2, const color_type& c)
193
+ {
194
+ if(y1 > y2) { int t = y2; y2 = y1; y1 = t; }
195
+ if(x > xmax()) return;
196
+ if(x < xmin()) return;
197
+ if(y1 > ymax()) return;
198
+ if(y2 < ymin()) return;
199
+
200
+ if(y1 < ymin()) y1 = ymin();
201
+ if(y2 > ymax()) y2 = ymax();
202
+
203
+ m_ren->copy_vline(x, y1, y2 - y1 + 1, c);
204
+ }
205
+
206
+ //--------------------------------------------------------------------
207
+ void blend_hline(int x1, int y, int x2,
208
+ const color_type& c, cover_type cover)
209
+ {
210
+ if(x1 > x2) { int t = x2; x2 = x1; x1 = t; }
211
+ if(y > ymax()) return;
212
+ if(y < ymin()) return;
213
+ if(x1 > xmax()) return;
214
+ if(x2 < xmin()) return;
215
+
216
+ if(x1 < xmin()) x1 = xmin();
217
+ if(x2 > xmax()) x2 = xmax();
218
+
219
+ m_ren->blend_hline(x1, y, x2 - x1 + 1, c, cover);
220
+ }
221
+
222
+ //--------------------------------------------------------------------
223
+ void blend_vline(int x, int y1, int y2,
224
+ const color_type& c, cover_type cover)
225
+ {
226
+ if(y1 > y2) { int t = y2; y2 = y1; y1 = t; }
227
+ if(x > xmax()) return;
228
+ if(x < xmin()) return;
229
+ if(y1 > ymax()) return;
230
+ if(y2 < ymin()) return;
231
+
232
+ if(y1 < ymin()) y1 = ymin();
233
+ if(y2 > ymax()) y2 = ymax();
234
+
235
+ m_ren->blend_vline(x, y1, y2 - y1 + 1, c, cover);
236
+ }
237
+
238
+
239
+ //--------------------------------------------------------------------
240
+ void copy_bar(int x1, int y1, int x2, int y2, const color_type& c)
241
+ {
242
+ rect_i rc(x1, y1, x2, y2);
243
+ rc.normalize();
244
+ if(rc.clip(clip_box()))
245
+ {
246
+ int y;
247
+ for(y = rc.y1; y <= rc.y2; y++)
248
+ {
249
+ m_ren->copy_hline(rc.x1, y, unsigned(rc.x2 - rc.x1 + 1), c);
250
+ }
251
+ }
252
+ }
253
+
254
+ //--------------------------------------------------------------------
255
+ void blend_bar(int x1, int y1, int x2, int y2,
256
+ const color_type& c, cover_type cover)
257
+ {
258
+ rect_i rc(x1, y1, x2, y2);
259
+ rc.normalize();
260
+ if(rc.clip(clip_box()))
261
+ {
262
+ int y;
263
+ for(y = rc.y1; y <= rc.y2; y++)
264
+ {
265
+ m_ren->blend_hline(rc.x1,
266
+ y,
267
+ unsigned(rc.x2 - rc.x1 + 1),
268
+ c,
269
+ cover);
270
+ }
271
+ }
272
+ }
273
+
274
+ //--------------------------------------------------------------------
275
+ void blend_solid_hspan(int x, int y, int len,
276
+ const color_type& c,
277
+ const cover_type* covers)
278
+ {
279
+ if(y > ymax()) return;
280
+ if(y < ymin()) return;
281
+
282
+ if(x < xmin())
283
+ {
284
+ len -= xmin() - x;
285
+ if(len <= 0) return;
286
+ covers += xmin() - x;
287
+ x = xmin();
288
+ }
289
+ if(x + len > xmax())
290
+ {
291
+ len = xmax() - x + 1;
292
+ if(len <= 0) return;
293
+ }
294
+ m_ren->blend_solid_hspan(x, y, len, c, covers);
295
+ }
296
+
297
+ //--------------------------------------------------------------------
298
+ void blend_solid_vspan(int x, int y, int len,
299
+ const color_type& c,
300
+ const cover_type* covers)
301
+ {
302
+ if(x > xmax()) return;
303
+ if(x < xmin()) return;
304
+
305
+ if(y < ymin())
306
+ {
307
+ len -= ymin() - y;
308
+ if(len <= 0) return;
309
+ covers += ymin() - y;
310
+ y = ymin();
311
+ }
312
+ if(y + len > ymax())
313
+ {
314
+ len = ymax() - y + 1;
315
+ if(len <= 0) return;
316
+ }
317
+ m_ren->blend_solid_vspan(x, y, len, c, covers);
318
+ }
319
+
320
+
321
+ //--------------------------------------------------------------------
322
+ void copy_color_hspan(int x, int y, int len, const color_type* colors)
323
+ {
324
+ if(y > ymax()) return;
325
+ if(y < ymin()) return;
326
+
327
+ if(x < xmin())
328
+ {
329
+ int d = xmin() - x;
330
+ len -= d;
331
+ if(len <= 0) return;
332
+ colors += d;
333
+ x = xmin();
334
+ }
335
+ if(x + len > xmax())
336
+ {
337
+ len = xmax() - x + 1;
338
+ if(len <= 0) return;
339
+ }
340
+ m_ren->copy_color_hspan(x, y, len, colors);
341
+ }
342
+
343
+
344
+ //--------------------------------------------------------------------
345
+ void copy_color_vspan(int x, int y, int len, const color_type* colors)
346
+ {
347
+ if(x > xmax()) return;
348
+ if(x < xmin()) return;
349
+
350
+ if(y < ymin())
351
+ {
352
+ int d = ymin() - y;
353
+ len -= d;
354
+ if(len <= 0) return;
355
+ colors += d;
356
+ y = ymin();
357
+ }
358
+ if(y + len > ymax())
359
+ {
360
+ len = ymax() - y + 1;
361
+ if(len <= 0) return;
362
+ }
363
+ m_ren->copy_color_vspan(x, y, len, colors);
364
+ }
365
+
366
+
367
+ //--------------------------------------------------------------------
368
+ void blend_color_hspan(int x, int y, int len,
369
+ const color_type* colors,
370
+ const cover_type* covers,
371
+ cover_type cover = agg::cover_full)
372
+ {
373
+ if(y > ymax()) return;
374
+ if(y < ymin()) return;
375
+
376
+ if(x < xmin())
377
+ {
378
+ int d = xmin() - x;
379
+ len -= d;
380
+ if(len <= 0) return;
381
+ if(covers) covers += d;
382
+ colors += d;
383
+ x = xmin();
384
+ }
385
+ if(x + len > xmax())
386
+ {
387
+ len = xmax() - x + 1;
388
+ if(len <= 0) return;
389
+ }
390
+ m_ren->blend_color_hspan(x, y, len, colors, covers, cover);
391
+ }
392
+
393
+ //--------------------------------------------------------------------
394
+ void blend_color_vspan(int x, int y, int len,
395
+ const color_type* colors,
396
+ const cover_type* covers,
397
+ cover_type cover = agg::cover_full)
398
+ {
399
+ if(x > xmax()) return;
400
+ if(x < xmin()) return;
401
+
402
+ if(y < ymin())
403
+ {
404
+ int d = ymin() - y;
405
+ len -= d;
406
+ if(len <= 0) return;
407
+ if(covers) covers += d;
408
+ colors += d;
409
+ y = ymin();
410
+ }
411
+ if(y + len > ymax())
412
+ {
413
+ len = ymax() - y + 1;
414
+ if(len <= 0) return;
415
+ }
416
+ m_ren->blend_color_vspan(x, y, len, colors, covers, cover);
417
+ }
418
+
419
+ //--------------------------------------------------------------------
420
+ rect_i clip_rect_area(rect_i& dst, rect_i& src, int wsrc, int hsrc) const
421
+ {
422
+ rect_i rc(0,0,0,0);
423
+ rect_i cb = clip_box();
424
+ ++cb.x2;
425
+ ++cb.y2;
426
+
427
+ if(src.x1 < 0)
428
+ {
429
+ dst.x1 -= src.x1;
430
+ src.x1 = 0;
431
+ }
432
+ if(src.y1 < 0)
433
+ {
434
+ dst.y1 -= src.y1;
435
+ src.y1 = 0;
436
+ }
437
+
438
+ if(src.x2 > wsrc) src.x2 = wsrc;
439
+ if(src.y2 > hsrc) src.y2 = hsrc;
440
+
441
+ if(dst.x1 < cb.x1)
442
+ {
443
+ src.x1 += cb.x1 - dst.x1;
444
+ dst.x1 = cb.x1;
445
+ }
446
+ if(dst.y1 < cb.y1)
447
+ {
448
+ src.y1 += cb.y1 - dst.y1;
449
+ dst.y1 = cb.y1;
450
+ }
451
+
452
+ if(dst.x2 > cb.x2) dst.x2 = cb.x2;
453
+ if(dst.y2 > cb.y2) dst.y2 = cb.y2;
454
+
455
+ rc.x2 = dst.x2 - dst.x1;
456
+ rc.y2 = dst.y2 - dst.y1;
457
+
458
+ if(rc.x2 > src.x2 - src.x1) rc.x2 = src.x2 - src.x1;
459
+ if(rc.y2 > src.y2 - src.y1) rc.y2 = src.y2 - src.y1;
460
+ return rc;
461
+ }
462
+
463
+ //--------------------------------------------------------------------
464
+ template<class RenBuf>
465
+ void copy_from(const RenBuf& src,
466
+ const rect_i* rect_src_ptr = 0,
467
+ int dx = 0,
468
+ int dy = 0)
469
+ {
470
+ rect_i rsrc(0, 0, src.width(), src.height());
471
+ if(rect_src_ptr)
472
+ {
473
+ rsrc.x1 = rect_src_ptr->x1;
474
+ rsrc.y1 = rect_src_ptr->y1;
475
+ rsrc.x2 = rect_src_ptr->x2 + 1;
476
+ rsrc.y2 = rect_src_ptr->y2 + 1;
477
+ }
478
+
479
+ // Version with xdst, ydst (absolute positioning)
480
+ //rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1);
481
+
482
+ // Version with dx, dy (relative positioning)
483
+ rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy);
484
+
485
+ rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height());
486
+
487
+ if(rc.x2 > 0)
488
+ {
489
+ int incy = 1;
490
+ if(rdst.y1 > rsrc.y1)
491
+ {
492
+ rsrc.y1 += rc.y2 - 1;
493
+ rdst.y1 += rc.y2 - 1;
494
+ incy = -1;
495
+ }
496
+ while(rc.y2 > 0)
497
+ {
498
+ m_ren->copy_from(src,
499
+ rdst.x1, rdst.y1,
500
+ rsrc.x1, rsrc.y1,
501
+ rc.x2);
502
+ rdst.y1 += incy;
503
+ rsrc.y1 += incy;
504
+ --rc.y2;
505
+ }
506
+ }
507
+ }
508
+
509
+ //--------------------------------------------------------------------
510
+ template<class SrcPixelFormatRenderer>
511
+ void blend_from(const SrcPixelFormatRenderer& src,
512
+ const rect_i* rect_src_ptr = 0,
513
+ int dx = 0,
514
+ int dy = 0,
515
+ cover_type cover = agg::cover_full)
516
+ {
517
+ rect_i rsrc(0, 0, src.width(), src.height());
518
+ if(rect_src_ptr)
519
+ {
520
+ rsrc.x1 = rect_src_ptr->x1;
521
+ rsrc.y1 = rect_src_ptr->y1;
522
+ rsrc.x2 = rect_src_ptr->x2 + 1;
523
+ rsrc.y2 = rect_src_ptr->y2 + 1;
524
+ }
525
+
526
+ // Version with xdst, ydst (absolute positioning)
527
+ //rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1);
528
+
529
+ // Version with dx, dy (relative positioning)
530
+ rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy);
531
+ rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height());
532
+
533
+ if(rc.x2 > 0)
534
+ {
535
+ int incy = 1;
536
+ if(rdst.y1 > rsrc.y1)
537
+ {
538
+ rsrc.y1 += rc.y2 - 1;
539
+ rdst.y1 += rc.y2 - 1;
540
+ incy = -1;
541
+ }
542
+ while(rc.y2 > 0)
543
+ {
544
+ typename SrcPixelFormatRenderer::row_data rw = src.row(rsrc.y1);
545
+ if(rw.ptr)
546
+ {
547
+ int x1src = rsrc.x1;
548
+ int x1dst = rdst.x1;
549
+ int len = rc.x2;
550
+ if(rw.x1 > x1src)
551
+ {
552
+ x1dst += rw.x1 - x1src;
553
+ len -= rw.x1 - x1src;
554
+ x1src = rw.x1;
555
+ }
556
+ if(len > 0)
557
+ {
558
+ if(x1src + len-1 > rw.x2)
559
+ {
560
+ len -= x1src + len - rw.x2 - 1;
561
+ }
562
+ if(len > 0)
563
+ {
564
+ m_ren->blend_from(src,
565
+ x1dst, rdst.y1,
566
+ x1src, rsrc.y1,
567
+ len,
568
+ cover);
569
+ }
570
+ }
571
+ }
572
+ rdst.y1 += incy;
573
+ rsrc.y1 += incy;
574
+ --rc.y2;
575
+ }
576
+ }
577
+ }
578
+
579
+ //--------------------------------------------------------------------
580
+ template<class SrcPixelFormatRenderer>
581
+ void blend_from_color(const SrcPixelFormatRenderer& src,
582
+ const color_type& color,
583
+ const rect_i* rect_src_ptr = 0,
584
+ int dx = 0,
585
+ int dy = 0,
586
+ cover_type cover = agg::cover_full)
587
+ {
588
+ rect_i rsrc(0, 0, src.width(), src.height());
589
+ if(rect_src_ptr)
590
+ {
591
+ rsrc.x1 = rect_src_ptr->x1;
592
+ rsrc.y1 = rect_src_ptr->y1;
593
+ rsrc.x2 = rect_src_ptr->x2 + 1;
594
+ rsrc.y2 = rect_src_ptr->y2 + 1;
595
+ }
596
+
597
+ // Version with xdst, ydst (absolute positioning)
598
+ //rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1);
599
+
600
+ // Version with dx, dy (relative positioning)
601
+ rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy);
602
+ rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height());
603
+
604
+ if(rc.x2 > 0)
605
+ {
606
+ int incy = 1;
607
+ if(rdst.y1 > rsrc.y1)
608
+ {
609
+ rsrc.y1 += rc.y2 - 1;
610
+ rdst.y1 += rc.y2 - 1;
611
+ incy = -1;
612
+ }
613
+ while(rc.y2 > 0)
614
+ {
615
+ typename SrcPixelFormatRenderer::row_data rw = src.row(rsrc.y1);
616
+ if(rw.ptr)
617
+ {
618
+ int x1src = rsrc.x1;
619
+ int x1dst = rdst.x1;
620
+ int len = rc.x2;
621
+ if(rw.x1 > x1src)
622
+ {
623
+ x1dst += rw.x1 - x1src;
624
+ len -= rw.x1 - x1src;
625
+ x1src = rw.x1;
626
+ }
627
+ if(len > 0)
628
+ {
629
+ if(x1src + len-1 > rw.x2)
630
+ {
631
+ len -= x1src + len - rw.x2 - 1;
632
+ }
633
+ if(len > 0)
634
+ {
635
+ m_ren->blend_from_color(src,
636
+ color,
637
+ x1dst, rdst.y1,
638
+ x1src, rsrc.y1,
639
+ len,
640
+ cover);
641
+ }
642
+ }
643
+ }
644
+ rdst.y1 += incy;
645
+ rsrc.y1 += incy;
646
+ --rc.y2;
647
+ }
648
+ }
649
+ }
650
+
651
+ //--------------------------------------------------------------------
652
+ template<class SrcPixelFormatRenderer>
653
+ void blend_from_lut(const SrcPixelFormatRenderer& src,
654
+ const color_type* color_lut,
655
+ const rect_i* rect_src_ptr = 0,
656
+ int dx = 0,
657
+ int dy = 0,
658
+ cover_type cover = agg::cover_full)
659
+ {
660
+ rect_i rsrc(0, 0, src.width(), src.height());
661
+ if(rect_src_ptr)
662
+ {
663
+ rsrc.x1 = rect_src_ptr->x1;
664
+ rsrc.y1 = rect_src_ptr->y1;
665
+ rsrc.x2 = rect_src_ptr->x2 + 1;
666
+ rsrc.y2 = rect_src_ptr->y2 + 1;
667
+ }
668
+
669
+ // Version with xdst, ydst (absolute positioning)
670
+ //rect_i rdst(xdst, ydst, xdst + rsrc.x2 - rsrc.x1, ydst + rsrc.y2 - rsrc.y1);
671
+
672
+ // Version with dx, dy (relative positioning)
673
+ rect_i rdst(rsrc.x1 + dx, rsrc.y1 + dy, rsrc.x2 + dx, rsrc.y2 + dy);
674
+ rect_i rc = clip_rect_area(rdst, rsrc, src.width(), src.height());
675
+
676
+ if(rc.x2 > 0)
677
+ {
678
+ int incy = 1;
679
+ if(rdst.y1 > rsrc.y1)
680
+ {
681
+ rsrc.y1 += rc.y2 - 1;
682
+ rdst.y1 += rc.y2 - 1;
683
+ incy = -1;
684
+ }
685
+ while(rc.y2 > 0)
686
+ {
687
+ typename SrcPixelFormatRenderer::row_data rw = src.row(rsrc.y1);
688
+ if(rw.ptr)
689
+ {
690
+ int x1src = rsrc.x1;
691
+ int x1dst = rdst.x1;
692
+ int len = rc.x2;
693
+ if(rw.x1 > x1src)
694
+ {
695
+ x1dst += rw.x1 - x1src;
696
+ len -= rw.x1 - x1src;
697
+ x1src = rw.x1;
698
+ }
699
+ if(len > 0)
700
+ {
701
+ if(x1src + len-1 > rw.x2)
702
+ {
703
+ len -= x1src + len - rw.x2 - 1;
704
+ }
705
+ if(len > 0)
706
+ {
707
+ m_ren->blend_from_lut(src,
708
+ color_lut,
709
+ x1dst, rdst.y1,
710
+ x1src, rsrc.y1,
711
+ len,
712
+ cover);
713
+ }
714
+ }
715
+ }
716
+ rdst.y1 += incy;
717
+ rsrc.y1 += incy;
718
+ --rc.y2;
719
+ }
720
+ }
721
+ }
722
+
723
+ private:
724
+ pixfmt_type* m_ren;
725
+ rect_i m_clip_box;
726
+ };
727
+
728
+
729
+ }
730
+
731
+ #endif
data/bundled_deps/agg/agg/agg_renderer_scanline.h ADDED
@@ -0,0 +1,854 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+
16
+ #ifndef AGG_RENDERER_SCANLINE_INCLUDED
17
+ #define AGG_RENDERER_SCANLINE_INCLUDED
18
+
19
+ #include <limits>
20
+ #include <cstdlib>
21
+ #include "agg_basics.h"
22
+ #include "agg_renderer_base.h"
23
+
24
+ namespace agg
25
+ {
26
+
27
+ //================================================render_scanline_aa_solid
28
+ template<class Scanline, class BaseRenderer, class ColorT>
29
+ void render_scanline_aa_solid(const Scanline& sl,
30
+ BaseRenderer& ren,
31
+ const ColorT& color)
32
+ {
33
+ int y = sl.y();
34
+ unsigned num_spans = sl.num_spans();
35
+ typename Scanline::const_iterator span = sl.begin();
36
+
37
+ for(;;)
38
+ {
39
+ int x = span->x;
40
+ if(span->len > 0)
41
+ {
42
+ ren.blend_solid_hspan(x, y, (unsigned)span->len,
43
+ color,
44
+ span->covers);
45
+ }
46
+ else
47
+ {
48
+ ren.blend_hline(x, y, (unsigned)(x - span->len - 1),
49
+ color,
50
+ *(span->covers));
51
+ }
52
+ if(--num_spans == 0) break;
53
+ ++span;
54
+ }
55
+ }
56
+
57
+ //===============================================render_scanlines_aa_solid
58
+ template<class Rasterizer, class Scanline,
59
+ class BaseRenderer, class ColorT>
60
+ void render_scanlines_aa_solid(Rasterizer& ras, Scanline& sl,
61
+ BaseRenderer& ren, const ColorT& color)
62
+ {
63
+ if(ras.rewind_scanlines())
64
+ {
65
+ // Explicitly convert "color" to the BaseRenderer color type.
66
+ // For example, it can be called with color type "rgba", while
67
+ // "rgba8" is needed. Otherwise it will be implicitly
68
+ // converted in the loop many times.
69
+ //----------------------
70
+ typename BaseRenderer::color_type ren_color = color;
71
+
72
+ sl.reset(ras.min_x(), ras.max_x());
73
+ while(ras.sweep_scanline(sl))
74
+ {
75
+ //render_scanline_aa_solid(sl, ren, ren_color);
76
+
77
+ // This code is equivalent to the above call (copy/paste).
78
+ // It's just a "manual" optimization for old compilers,
79
+ // like Microsoft Visual C++ v6.0
80
+ //-------------------------------
81
+ int y = sl.y();
82
+ unsigned num_spans = sl.num_spans();
83
+ typename Scanline::const_iterator span = sl.begin();
84
+
85
+ for(;;)
86
+ {
87
+ int x = span->x;
88
+ if(span->len > 0)
89
+ {
90
+ ren.blend_solid_hspan(x, y, (unsigned)span->len,
91
+ ren_color,
92
+ span->covers);
93
+ }
94
+ else
95
+ {
96
+ ren.blend_hline(x, y, (unsigned)(x - span->len - 1),
97
+ ren_color,
98
+ *(span->covers));
99
+ }
100
+ if(--num_spans == 0) break;
101
+ ++span;
102
+ }
103
+ }
104
+ }
105
+ }
106
+
107
+ //==============================================renderer_scanline_aa_solid
108
+ template<class BaseRenderer> class renderer_scanline_aa_solid
109
+ {
110
+ public:
111
+ typedef BaseRenderer base_ren_type;
112
+ typedef typename base_ren_type::color_type color_type;
113
+
114
+ //--------------------------------------------------------------------
115
+ renderer_scanline_aa_solid() : m_ren(0) {}
116
+ explicit renderer_scanline_aa_solid(base_ren_type& ren) : m_ren(&ren) {}
117
+ void attach(base_ren_type& ren)
118
+ {
119
+ m_ren = &ren;
120
+ }
121
+
122
+ //--------------------------------------------------------------------
123
+ void color(const color_type& c) { m_color = c; }
124
+ const color_type& color() const { return m_color; }
125
+
126
+ //--------------------------------------------------------------------
127
+ void prepare() {}
128
+
129
+ //--------------------------------------------------------------------
130
+ template<class Scanline> void render(const Scanline& sl)
131
+ {
132
+ render_scanline_aa_solid(sl, *m_ren, m_color);
133
+ }
134
+
135
+ private:
136
+ base_ren_type* m_ren;
137
+ color_type m_color;
138
+ };
139
+
140
+
141
+
142
+
143
+
144
+
145
+
146
+
147
+
148
+
149
+
150
+
151
+
152
+ //======================================================render_scanline_aa
153
+ template<class Scanline, class BaseRenderer,
154
+ class SpanAllocator, class SpanGenerator>
155
+ void render_scanline_aa(const Scanline& sl, BaseRenderer& ren,
156
+ SpanAllocator& alloc, SpanGenerator& span_gen)
157
+ {
158
+ int y = sl.y();
159
+
160
+ unsigned num_spans = sl.num_spans();
161
+ typename Scanline::const_iterator span = sl.begin();
162
+ for(;;)
163
+ {
164
+ int x = span->x;
165
+ int len = span->len;
166
+ const typename Scanline::cover_type* covers = span->covers;
167
+
168
+ if(len < 0) len = -len;
169
+ typename BaseRenderer::color_type* colors = alloc.allocate(len);
170
+ span_gen.generate(colors, x, y, len);
171
+ ren.blend_color_hspan(x, y, len, colors,
172
+ (span->len < 0) ? 0 : covers, *covers);
173
+
174
+ if(--num_spans == 0) break;
175
+ ++span;
176
+ }
177
+ }
178
+
179
+ //=====================================================render_scanlines_aa
180
+ template<class Rasterizer, class Scanline, class BaseRenderer,
181
+ class SpanAllocator, class SpanGenerator>
182
+ void render_scanlines_aa(Rasterizer& ras, Scanline& sl, BaseRenderer& ren,
183
+ SpanAllocator& alloc, SpanGenerator& span_gen)
184
+ {
185
+ if(ras.rewind_scanlines())
186
+ {
187
+ sl.reset(ras.min_x(), ras.max_x());
188
+ span_gen.prepare();
189
+ while(ras.sweep_scanline(sl))
190
+ {
191
+ render_scanline_aa(sl, ren, alloc, span_gen);
192
+ }
193
+ }
194
+ }
195
+
196
+ //====================================================renderer_scanline_aa
197
+ template<class BaseRenderer, class SpanAllocator, class SpanGenerator>
198
+ class renderer_scanline_aa
199
+ {
200
+ public:
201
+ typedef BaseRenderer base_ren_type;
202
+ typedef SpanAllocator alloc_type;
203
+ typedef SpanGenerator span_gen_type;
204
+
205
+ //--------------------------------------------------------------------
206
+ renderer_scanline_aa() : m_ren(0), m_alloc(0), m_span_gen(0) {}
207
+ renderer_scanline_aa(base_ren_type& ren,
208
+ alloc_type& alloc,
209
+ span_gen_type& span_gen) :
210
+ m_ren(&ren),
211
+ m_alloc(&alloc),
212
+ m_span_gen(&span_gen)
213
+ {}
214
+ void attach(base_ren_type& ren,
215
+ alloc_type& alloc,
216
+ span_gen_type& span_gen)
217
+ {
218
+ m_ren = &ren;
219
+ m_alloc = &alloc;
220
+ m_span_gen = &span_gen;
221
+ }
222
+
223
+ //--------------------------------------------------------------------
224
+ void prepare() { m_span_gen->prepare(); }
225
+
226
+ //--------------------------------------------------------------------
227
+ template<class Scanline> void render(const Scanline& sl)
228
+ {
229
+ render_scanline_aa(sl, *m_ren, *m_alloc, *m_span_gen);
230
+ }
231
+
232
+ private:
233
+ base_ren_type* m_ren;
234
+ alloc_type* m_alloc;
235
+ span_gen_type* m_span_gen;
236
+ };
237
+
238
+
239
+
240
+
241
+
242
+
243
+ //===============================================render_scanline_bin_solid
244
+ template<class Scanline, class BaseRenderer, class ColorT>
245
+ void render_scanline_bin_solid(const Scanline& sl,
246
+ BaseRenderer& ren,
247
+ const ColorT& color)
248
+ {
249
+ unsigned num_spans = sl.num_spans();
250
+ typename Scanline::const_iterator span = sl.begin();
251
+ for(;;)
252
+ {
253
+ ren.blend_hline(span->x,
254
+ sl.y(),
255
+ span->x - 1 + ((span->len < 0) ?
256
+ -span->len :
257
+ span->len),
258
+ color,
259
+ cover_full);
260
+ if(--num_spans == 0) break;
261
+ ++span;
262
+ }
263
+ }
264
+
265
+ //==============================================render_scanlines_bin_solid
266
+ template<class Rasterizer, class Scanline,
267
+ class BaseRenderer, class ColorT>
268
+ void render_scanlines_bin_solid(Rasterizer& ras, Scanline& sl,
269
+ BaseRenderer& ren, const ColorT& color)
270
+ {
271
+ if(ras.rewind_scanlines())
272
+ {
273
+ // Explicitly convert "color" to the BaseRenderer color type.
274
+ // For example, it can be called with color type "rgba", while
275
+ // "rgba8" is needed. Otherwise it will be implicitly
276
+ // converted in the loop many times.
277
+ //----------------------
278
+ typename BaseRenderer::color_type ren_color(color);
279
+
280
+ sl.reset(ras.min_x(), ras.max_x());
281
+ while(ras.sweep_scanline(sl))
282
+ {
283
+ //render_scanline_bin_solid(sl, ren, ren_color);
284
+
285
+ // This code is equivalent to the above call (copy/paste).
286
+ // It's just a "manual" optimization for old compilers,
287
+ // like Microsoft Visual C++ v6.0
288
+ //-------------------------------
289
+ unsigned num_spans = sl.num_spans();
290
+ typename Scanline::const_iterator span = sl.begin();
291
+ for(;;)
292
+ {
293
+ ren.blend_hline(span->x,
294
+ sl.y(),
295
+ span->x - 1 + ((span->len < 0) ?
296
+ -span->len :
297
+ span->len),
298
+ ren_color,
299
+ cover_full);
300
+ if(--num_spans == 0) break;
301
+ ++span;
302
+ }
303
+ }
304
+ }
305
+ }
306
+
307
+ //=============================================renderer_scanline_bin_solid
308
+ template<class BaseRenderer> class renderer_scanline_bin_solid
309
+ {
310
+ public:
311
+ typedef BaseRenderer base_ren_type;
312
+ typedef typename base_ren_type::color_type color_type;
313
+
314
+ //--------------------------------------------------------------------
315
+ renderer_scanline_bin_solid() : m_ren(0) {}
316
+ explicit renderer_scanline_bin_solid(base_ren_type& ren) : m_ren(&ren) {}
317
+ void attach(base_ren_type& ren)
318
+ {
319
+ m_ren = &ren;
320
+ }
321
+
322
+ //--------------------------------------------------------------------
323
+ void color(const color_type& c) { m_color = c; }
324
+ const color_type& color() const { return m_color; }
325
+
326
+ //--------------------------------------------------------------------
327
+ void prepare() {}
328
+
329
+ //--------------------------------------------------------------------
330
+ template<class Scanline> void render(const Scanline& sl)
331
+ {
332
+ render_scanline_bin_solid(sl, *m_ren, m_color);
333
+ }
334
+
335
+ private:
336
+ base_ren_type* m_ren;
337
+ color_type m_color;
338
+ };
339
+
340
+
341
+
342
+
343
+
344
+
345
+
346
+
347
+ //======================================================render_scanline_bin
348
+ template<class Scanline, class BaseRenderer,
349
+ class SpanAllocator, class SpanGenerator>
350
+ void render_scanline_bin(const Scanline& sl, BaseRenderer& ren,
351
+ SpanAllocator& alloc, SpanGenerator& span_gen)
352
+ {
353
+ int y = sl.y();
354
+
355
+ unsigned num_spans = sl.num_spans();
356
+ typename Scanline::const_iterator span = sl.begin();
357
+ for(;;)
358
+ {
359
+ int x = span->x;
360
+ int len = span->len;
361
+ if(len < 0) len = -len;
362
+ typename BaseRenderer::color_type* colors = alloc.allocate(len);
363
+ span_gen.generate(colors, x, y, len);
364
+ ren.blend_color_hspan(x, y, len, colors, 0, cover_full);
365
+ if(--num_spans == 0) break;
366
+ ++span;
367
+ }
368
+ }
369
+
370
+ //=====================================================render_scanlines_bin
371
+ template<class Rasterizer, class Scanline, class BaseRenderer,
372
+ class SpanAllocator, class SpanGenerator>
373
+ void render_scanlines_bin(Rasterizer& ras, Scanline& sl, BaseRenderer& ren,
374
+ SpanAllocator& alloc, SpanGenerator& span_gen)
375
+ {
376
+ if(ras.rewind_scanlines())
377
+ {
378
+ sl.reset(ras.min_x(), ras.max_x());
379
+ span_gen.prepare();
380
+ while(ras.sweep_scanline(sl))
381
+ {
382
+ render_scanline_bin(sl, ren, alloc, span_gen);
383
+ }
384
+ }
385
+ }
386
+
387
+ //====================================================renderer_scanline_bin
388
+ template<class BaseRenderer, class SpanAllocator, class SpanGenerator>
389
+ class renderer_scanline_bin
390
+ {
391
+ public:
392
+ typedef BaseRenderer base_ren_type;
393
+ typedef SpanAllocator alloc_type;
394
+ typedef SpanGenerator span_gen_type;
395
+
396
+ //--------------------------------------------------------------------
397
+ renderer_scanline_bin() : m_ren(0), m_alloc(0), m_span_gen(0) {}
398
+ renderer_scanline_bin(base_ren_type& ren,
399
+ alloc_type& alloc,
400
+ span_gen_type& span_gen) :
401
+ m_ren(&ren),
402
+ m_alloc(&alloc),
403
+ m_span_gen(&span_gen)
404
+ {}
405
+ void attach(base_ren_type& ren,
406
+ alloc_type& alloc,
407
+ span_gen_type& span_gen)
408
+ {
409
+ m_ren = &ren;
410
+ m_alloc = &alloc;
411
+ m_span_gen = &span_gen;
412
+ }
413
+
414
+ //--------------------------------------------------------------------
415
+ void prepare() { m_span_gen->prepare(); }
416
+
417
+ //--------------------------------------------------------------------
418
+ template<class Scanline> void render(const Scanline& sl)
419
+ {
420
+ render_scanline_bin(sl, *m_ren, *m_alloc, *m_span_gen);
421
+ }
422
+
423
+ private:
424
+ base_ren_type* m_ren;
425
+ alloc_type* m_alloc;
426
+ span_gen_type* m_span_gen;
427
+ };
428
+
429
+
430
+
431
+
432
+
433
+
434
+
435
+
436
+
437
+
438
+ //========================================================render_scanlines
439
+ template<class Rasterizer, class Scanline, class Renderer>
440
+ void render_scanlines(Rasterizer& ras, Scanline& sl, Renderer& ren)
441
+ {
442
+ if(ras.rewind_scanlines())
443
+ {
444
+ sl.reset(ras.min_x(), ras.max_x());
445
+ ren.prepare();
446
+ while(ras.sweep_scanline(sl))
447
+ {
448
+ ren.render(sl);
449
+ }
450
+ }
451
+ }
452
+
453
+ //========================================================render_all_paths
454
+ template<class Rasterizer, class Scanline, class Renderer,
455
+ class VertexSource, class ColorStorage, class PathId>
456
+ void render_all_paths(Rasterizer& ras,
457
+ Scanline& sl,
458
+ Renderer& r,
459
+ VertexSource& vs,
460
+ const ColorStorage& as,
461
+ const PathId& path_id,
462
+ unsigned num_paths)
463
+ {
464
+ for(unsigned i = 0; i < num_paths; i++)
465
+ {
466
+ ras.reset();
467
+ ras.add_path(vs, path_id[i]);
468
+ r.color(as[i]);
469
+ render_scanlines(ras, sl, r);
470
+ }
471
+ }
472
+
473
+
474
+
475
+
476
+
477
+
478
+ //=============================================render_scanlines_compound
479
+ template<class Rasterizer,
480
+ class ScanlineAA,
481
+ class ScanlineBin,
482
+ class BaseRenderer,
483
+ class SpanAllocator,
484
+ class StyleHandler>
485
+ void render_scanlines_compound(Rasterizer& ras,
486
+ ScanlineAA& sl_aa,
487
+ ScanlineBin& sl_bin,
488
+ BaseRenderer& ren,
489
+ SpanAllocator& alloc,
490
+ StyleHandler& sh)
491
+ {
492
+ if(ras.rewind_scanlines())
493
+ {
494
+ int min_x = ras.min_x();
495
+ int len = ras.max_x() - min_x + 2;
496
+ sl_aa.reset(min_x, ras.max_x());
497
+ sl_bin.reset(min_x, ras.max_x());
498
+
499
+ typedef typename BaseRenderer::color_type color_type;
500
+ color_type* color_span = alloc.allocate(len * 2);
501
+ color_type* mix_buffer = color_span + len;
502
+ unsigned num_spans;
503
+
504
+ unsigned num_styles;
505
+ unsigned style;
506
+ bool solid;
507
+ while((num_styles = ras.sweep_styles()) > 0)
508
+ {
509
+ typename ScanlineAA::const_iterator span_aa;
510
+ if(num_styles == 1)
511
+ {
512
+ // Optimization for a single style. Happens often
513
+ //-------------------------
514
+ if(ras.sweep_scanline(sl_aa, 0))
515
+ {
516
+ style = ras.style(0);
517
+ if(sh.is_solid(style))
518
+ {
519
+ // Just solid fill
520
+ //-----------------------
521
+ render_scanline_aa_solid(sl_aa, ren, sh.color(style));
522
+ }
523
+ else
524
+ {
525
+ // Arbitrary span generator
526
+ //-----------------------
527
+ span_aa = sl_aa.begin();
528
+ num_spans = sl_aa.num_spans();
529
+ for(;;)
530
+ {
531
+ len = span_aa->len;
532
+ sh.generate_span(color_span,
533
+ span_aa->x,
534
+ sl_aa.y(),
535
+ len,
536
+ style);
537
+
538
+ ren.blend_color_hspan(span_aa->x,
539
+ sl_aa.y(),
540
+ span_aa->len,
541
+ color_span,
542
+ span_aa->covers);
543
+ if(--num_spans == 0) break;
544
+ ++span_aa;
545
+ }
546
+ }
547
+ }
548
+ }
549
+ else
550
+ {
551
+ if(ras.sweep_scanline(sl_bin, -1))
552
+ {
553
+ // Clear the spans of the mix_buffer
554
+ //--------------------
555
+ typename ScanlineBin::const_iterator span_bin = sl_bin.begin();
556
+ num_spans = sl_bin.num_spans();
557
+ for(;;)
558
+ {
559
+ memset(mix_buffer + span_bin->x - min_x,
560
+ 0,
561
+ span_bin->len * sizeof(color_type));
562
+
563
+ if(--num_spans == 0) break;
564
+ ++span_bin;
565
+ }
566
+
567
+ unsigned i;
568
+ for(i = 0; i < num_styles; i++)
569
+ {
570
+ style = ras.style(i);
571
+ solid = sh.is_solid(style);
572
+
573
+ if(ras.sweep_scanline(sl_aa, i))
574
+ {
575
+ color_type* colors;
576
+ color_type* cspan;
577
+ typename ScanlineAA::cover_type* covers;
578
+ span_aa = sl_aa.begin();
579
+ num_spans = sl_aa.num_spans();
580
+ if(solid)
581
+ {
582
+ // Just solid fill
583
+ //-----------------------
584
+ for(;;)
585
+ {
586
+ color_type c = sh.color(style);
587
+ len = span_aa->len;
588
+ colors = mix_buffer + span_aa->x - min_x;
589
+ covers = span_aa->covers;
590
+ do
591
+ {
592
+ if(*covers == cover_full)
593
+ {
594
+ *colors = c;
595
+ }
596
+ else
597
+ {
598
+ colors->add(c, *covers);
599
+ }
600
+ ++colors;
601
+ ++covers;
602
+ }
603
+ while(--len);
604
+ if(--num_spans == 0) break;
605
+ ++span_aa;
606
+ }
607
+ }
608
+ else
609
+ {
610
+ // Arbitrary span generator
611
+ //-----------------------
612
+ for(;;)
613
+ {
614
+ len = span_aa->len;
615
+ colors = mix_buffer + span_aa->x - min_x;
616
+ cspan = color_span;
617
+ sh.generate_span(cspan,
618
+ span_aa->x,
619
+ sl_aa.y(),
620
+ len,
621
+ style);
622
+ covers = span_aa->covers;
623
+ do
624
+ {
625
+ if(*covers == cover_full)
626
+ {
627
+ *colors = *cspan;
628
+ }
629
+ else
630
+ {
631
+ colors->add(*cspan, *covers);
632
+ }
633
+ ++cspan;
634
+ ++colors;
635
+ ++covers;
636
+ }
637
+ while(--len);
638
+ if(--num_spans == 0) break;
639
+ ++span_aa;
640
+ }
641
+ }
642
+ }
643
+ }
644
+
645
+ // Emit the blended result as a color hspan
646
+ //-------------------------
647
+ span_bin = sl_bin.begin();
648
+ num_spans = sl_bin.num_spans();
649
+ for(;;)
650
+ {
651
+ ren.blend_color_hspan(span_bin->x,
652
+ sl_bin.y(),
653
+ span_bin->len,
654
+ mix_buffer + span_bin->x - min_x,
655
+ 0,
656
+ cover_full);
657
+ if(--num_spans == 0) break;
658
+ ++span_bin;
659
+ }
660
+ } // if(ras.sweep_scanline(sl_bin, -1))
661
+ } // if(num_styles == 1) ... else
662
+ } // while((num_styles = ras.sweep_styles()) > 0)
663
+ } // if(ras.rewind_scanlines())
664
+ }
665
+
666
+ //=======================================render_scanlines_compound_layered
667
+ template<class Rasterizer,
668
+ class ScanlineAA,
669
+ class BaseRenderer,
670
+ class SpanAllocator,
671
+ class StyleHandler>
672
+ void render_scanlines_compound_layered(Rasterizer& ras,
673
+ ScanlineAA& sl_aa,
674
+ BaseRenderer& ren,
675
+ SpanAllocator& alloc,
676
+ StyleHandler& sh)
677
+ {
678
+ if(ras.rewind_scanlines())
679
+ {
680
+ int min_x = ras.min_x();
681
+ int len = ras.max_x() - min_x + 2;
682
+ sl_aa.reset(min_x, ras.max_x());
683
+
684
+ typedef typename BaseRenderer::color_type color_type;
685
+ color_type* color_span = alloc.allocate(len * 2);
686
+ color_type* mix_buffer = color_span + len;
687
+ cover_type* cover_buffer = ras.allocate_cover_buffer(len);
688
+ unsigned num_spans;
689
+
690
+ unsigned num_styles;
691
+ unsigned style;
692
+ bool solid;
693
+ while((num_styles = ras.sweep_styles()) > 0)
694
+ {
695
+ typename ScanlineAA::const_iterator span_aa;
696
+ if(num_styles == 1)
697
+ {
698
+ // Optimization for a single style. Happens often
699
+ //-------------------------
700
+ if(ras.sweep_scanline(sl_aa, 0))
701
+ {
702
+ style = ras.style(0);
703
+ if(sh.is_solid(style))
704
+ {
705
+ // Just solid fill
706
+ //-----------------------
707
+ render_scanline_aa_solid(sl_aa, ren, sh.color(style));
708
+ }
709
+ else
710
+ {
711
+ // Arbitrary span generator
712
+ //-----------------------
713
+ span_aa = sl_aa.begin();
714
+ num_spans = sl_aa.num_spans();
715
+ for(;;)
716
+ {
717
+ len = span_aa->len;
718
+ sh.generate_span(color_span,
719
+ span_aa->x,
720
+ sl_aa.y(),
721
+ len,
722
+ style);
723
+
724
+ ren.blend_color_hspan(span_aa->x,
725
+ sl_aa.y(),
726
+ span_aa->len,
727
+ color_span,
728
+ span_aa->covers);
729
+ if(--num_spans == 0) break;
730
+ ++span_aa;
731
+ }
732
+ }
733
+ }
734
+ }
735
+ else
736
+ {
737
+ int sl_start = ras.scanline_start();
738
+ unsigned sl_len = ras.scanline_length();
739
+
740
+ if(sl_len)
741
+ {
742
+ memset(mix_buffer + sl_start - min_x,
743
+ 0,
744
+ sl_len * sizeof(color_type));
745
+
746
+ memset(cover_buffer + sl_start - min_x,
747
+ 0,
748
+ sl_len * sizeof(cover_type));
749
+
750
+ int sl_y = std::numeric_limits<int>::max();
751
+ unsigned i;
752
+ for(i = 0; i < num_styles; i++)
753
+ {
754
+ style = ras.style(i);
755
+ solid = sh.is_solid(style);
756
+
757
+ if(ras.sweep_scanline(sl_aa, i))
758
+ {
759
+ unsigned cover;
760
+ color_type* colors;
761
+ color_type* cspan;
762
+ cover_type* src_covers;
763
+ cover_type* dst_covers;
764
+ span_aa = sl_aa.begin();
765
+ num_spans = sl_aa.num_spans();
766
+ sl_y = sl_aa.y();
767
+ if(solid)
768
+ {
769
+ // Just solid fill
770
+ //-----------------------
771
+ for(;;)
772
+ {
773
+ color_type c = sh.color(style);
774
+ len = span_aa->len;
775
+ colors = mix_buffer + span_aa->x - min_x;
776
+ src_covers = span_aa->covers;
777
+ dst_covers = cover_buffer + span_aa->x - min_x;
778
+ do
779
+ {
780
+ cover = *src_covers;
781
+ if(*dst_covers + cover > cover_full)
782
+ {
783
+ cover = cover_full - *dst_covers;
784
+ }
785
+ if(cover)
786
+ {
787
+ colors->add(c, cover);
788
+ *dst_covers += cover;
789
+ }
790
+ ++colors;
791
+ ++src_covers;
792
+ ++dst_covers;
793
+ }
794
+ while(--len);
795
+ if(--num_spans == 0) break;
796
+ ++span_aa;
797
+ }
798
+ }
799
+ else
800
+ {
801
+ // Arbitrary span generator
802
+ //-----------------------
803
+ for(;;)
804
+ {
805
+ len = span_aa->len;
806
+ colors = mix_buffer + span_aa->x - min_x;
807
+ cspan = color_span;
808
+ sh.generate_span(cspan,
809
+ span_aa->x,
810
+ sl_aa.y(),
811
+ len,
812
+ style);
813
+ src_covers = span_aa->covers;
814
+ dst_covers = cover_buffer + span_aa->x - min_x;
815
+ do
816
+ {
817
+ cover = *src_covers;
818
+ if(*dst_covers + cover > cover_full)
819
+ {
820
+ cover = cover_full - *dst_covers;
821
+ }
822
+ if(cover)
823
+ {
824
+ colors->add(*cspan, cover);
825
+ *dst_covers += cover;
826
+ }
827
+ ++cspan;
828
+ ++colors;
829
+ ++src_covers;
830
+ ++dst_covers;
831
+ }
832
+ while(--len);
833
+ if(--num_spans == 0) break;
834
+ ++span_aa;
835
+ }
836
+ }
837
+ }
838
+ }
839
+ ren.blend_color_hspan(sl_start,
840
+ sl_y,
841
+ sl_len,
842
+ mix_buffer + sl_start - min_x,
843
+ 0,
844
+ cover_full);
845
+ } //if(sl_len)
846
+ } //if(num_styles == 1) ... else
847
+ } //while((num_styles = ras.sweep_styles()) > 0)
848
+ } //if(ras.rewind_scanlines())
849
+ }
850
+
851
+
852
+ }
853
+
854
+ #endif
data/bundled_deps/agg/agg/agg_rendering_buffer.h ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+ //
16
+ // class rendering_buffer
17
+ //
18
+ //----------------------------------------------------------------------------
19
+
20
+ #ifndef AGG_RENDERING_BUFFER_INCLUDED
21
+ #define AGG_RENDERING_BUFFER_INCLUDED
22
+
23
+ #include "agg_array.h"
24
+
25
+ namespace agg
26
+ {
27
+
28
+ //===========================================================row_accessor
29
+ template<class T> class row_accessor
30
+ {
31
+ public:
32
+ typedef const_row_info<T> row_data;
33
+
34
+ //-------------------------------------------------------------------
35
+ row_accessor() :
36
+ m_buf(0),
37
+ m_start(0),
38
+ m_width(0),
39
+ m_height(0),
40
+ m_stride(0)
41
+ {
42
+ }
43
+
44
+ //--------------------------------------------------------------------
45
+ row_accessor(T* buf, unsigned width, unsigned height, int stride) :
46
+ m_buf(0),
47
+ m_start(0),
48
+ m_width(0),
49
+ m_height(0),
50
+ m_stride(0)
51
+ {
52
+ attach(buf, width, height, stride);
53
+ }
54
+
55
+
56
+ //--------------------------------------------------------------------
57
+ void attach(T* buf, unsigned width, unsigned height, int stride)
58
+ {
59
+ m_buf = m_start = buf;
60
+ m_width = width;
61
+ m_height = height;
62
+ m_stride = stride;
63
+ if(stride < 0)
64
+ {
65
+ m_start = m_buf - int(height - 1) * stride;
66
+ }
67
+ }
68
+
69
+ //--------------------------------------------------------------------
70
+ AGG_INLINE T* buf() { return m_buf; }
71
+ AGG_INLINE const T* buf() const { return m_buf; }
72
+ AGG_INLINE unsigned width() const { return m_width; }
73
+ AGG_INLINE unsigned height() const { return m_height; }
74
+ AGG_INLINE int stride() const { return m_stride; }
75
+ AGG_INLINE unsigned stride_abs() const
76
+ {
77
+ return (m_stride < 0) ? unsigned(-m_stride) : unsigned(m_stride);
78
+ }
79
+
80
+ //--------------------------------------------------------------------
81
+ AGG_INLINE T* row_ptr(int, int y, unsigned)
82
+ {
83
+ return m_start + y * m_stride;
84
+ }
85
+ AGG_INLINE T* row_ptr(int y) { return m_start + y * m_stride; }
86
+ AGG_INLINE const T* row_ptr(int y) const { return m_start + y * m_stride; }
87
+ AGG_INLINE row_data row (int y) const
88
+ {
89
+ return row_data(0, m_width-1, row_ptr(y));
90
+ }
91
+
92
+ //--------------------------------------------------------------------
93
+ template<class RenBuf>
94
+ void copy_from(const RenBuf& src)
95
+ {
96
+ unsigned h = height();
97
+ if(src.height() < h) h = src.height();
98
+
99
+ unsigned l = stride_abs();
100
+ if(src.stride_abs() < l) l = src.stride_abs();
101
+
102
+ l *= sizeof(T);
103
+
104
+ unsigned y;
105
+ unsigned w = width();
106
+ for (y = 0; y < h; y++)
107
+ {
108
+ memcpy(row_ptr(0, y, w), src.row_ptr(y), l);
109
+ }
110
+ }
111
+
112
+ //--------------------------------------------------------------------
113
+ void clear(T value)
114
+ {
115
+ unsigned y;
116
+ unsigned w = width();
117
+ unsigned stride = stride_abs();
118
+ for(y = 0; y < height(); y++)
119
+ {
120
+ T* p = row_ptr(0, y, w);
121
+ unsigned x;
122
+ for(x = 0; x < stride; x++)
123
+ {
124
+ *p++ = value;
125
+ }
126
+ }
127
+ }
128
+
129
+ private:
130
+ //--------------------------------------------------------------------
131
+ T* m_buf; // Pointer to renrdering buffer
132
+ T* m_start; // Pointer to first pixel depending on stride
133
+ unsigned m_width; // Width in pixels
134
+ unsigned m_height; // Height in pixels
135
+ int m_stride; // Number of bytes per row. Can be < 0
136
+ };
137
+
138
+
139
+
140
+
141
+ //==========================================================row_ptr_cache
142
+ template<class T> class row_ptr_cache
143
+ {
144
+ public:
145
+ typedef const_row_info<T> row_data;
146
+
147
+ //-------------------------------------------------------------------
148
+ row_ptr_cache() :
149
+ m_buf(0),
150
+ m_rows(),
151
+ m_width(0),
152
+ m_height(0),
153
+ m_stride(0)
154
+ {
155
+ }
156
+
157
+ //--------------------------------------------------------------------
158
+ row_ptr_cache(T* buf, unsigned width, unsigned height, int stride) :
159
+ m_buf(0),
160
+ m_rows(),
161
+ m_width(0),
162
+ m_height(0),
163
+ m_stride(0)
164
+ {
165
+ attach(buf, width, height, stride);
166
+ }
167
+
168
+ //--------------------------------------------------------------------
169
+ void attach(T* buf, unsigned width, unsigned height, int stride)
170
+ {
171
+ m_buf = buf;
172
+ m_width = width;
173
+ m_height = height;
174
+ m_stride = stride;
175
+ if(height > m_rows.size())
176
+ {
177
+ m_rows.resize(height);
178
+ }
179
+
180
+ T* row_ptr = m_buf;
181
+
182
+ if(stride < 0)
183
+ {
184
+ row_ptr = m_buf - int(height - 1) * stride;
185
+ }
186
+
187
+ T** rows = &m_rows[0];
188
+
189
+ while(height--)
190
+ {
191
+ *rows++ = row_ptr;
192
+ row_ptr += stride;
193
+ }
194
+ }
195
+
196
+ //--------------------------------------------------------------------
197
+ AGG_INLINE T* buf() { return m_buf; }
198
+ AGG_INLINE const T* buf() const { return m_buf; }
199
+ AGG_INLINE unsigned width() const { return m_width; }
200
+ AGG_INLINE unsigned height() const { return m_height; }
201
+ AGG_INLINE int stride() const { return m_stride; }
202
+ AGG_INLINE unsigned stride_abs() const
203
+ {
204
+ return (m_stride < 0) ? unsigned(-m_stride) : unsigned(m_stride);
205
+ }
206
+
207
+ //--------------------------------------------------------------------
208
+ AGG_INLINE T* row_ptr(int, int y, unsigned)
209
+ {
210
+ return m_rows[y];
211
+ }
212
+ AGG_INLINE T* row_ptr(int y) { return m_rows[y]; }
213
+ AGG_INLINE const T* row_ptr(int y) const { return m_rows[y]; }
214
+ AGG_INLINE row_data row (int y) const
215
+ {
216
+ return row_data(0, m_width-1, m_rows[y]);
217
+ }
218
+
219
+ //--------------------------------------------------------------------
220
+ T const* const* rows() const { return &m_rows[0]; }
221
+
222
+ //--------------------------------------------------------------------
223
+ template<class RenBuf>
224
+ void copy_from(const RenBuf& src)
225
+ {
226
+ unsigned h = height();
227
+ if(src.height() < h) h = src.height();
228
+
229
+ unsigned l = stride_abs();
230
+ if(src.stride_abs() < l) l = src.stride_abs();
231
+
232
+ l *= sizeof(T);
233
+
234
+ unsigned y;
235
+ unsigned w = width();
236
+ for (y = 0; y < h; y++)
237
+ {
238
+ memcpy(row_ptr(0, y, w), src.row_ptr(y), l);
239
+ }
240
+ }
241
+
242
+ //--------------------------------------------------------------------
243
+ void clear(T value)
244
+ {
245
+ unsigned y;
246
+ unsigned w = width();
247
+ unsigned stride = stride_abs();
248
+ for(y = 0; y < height(); y++)
249
+ {
250
+ T* p = row_ptr(0, y, w);
251
+ unsigned x;
252
+ for(x = 0; x < stride; x++)
253
+ {
254
+ *p++ = value;
255
+ }
256
+ }
257
+ }
258
+
259
+ private:
260
+ //--------------------------------------------------------------------
261
+ T* m_buf; // Pointer to renrdering buffer
262
+ pod_array<T*> m_rows; // Pointers to each row of the buffer
263
+ unsigned m_width; // Width in pixels
264
+ unsigned m_height; // Height in pixels
265
+ int m_stride; // Number of bytes per row. Can be < 0
266
+ };
267
+
268
+
269
+
270
+
271
+ //========================================================rendering_buffer
272
+ //
273
+ // The definition of the main type for accessing the rows in the frame
274
+ // buffer. It provides functionality to navigate to the rows in a
275
+ // rectangular matrix, from top to bottom or from bottom to top depending
276
+ // on stride.
277
+ //
278
+ // row_accessor is cheap to create/destroy, but performs one multiplication
279
+ // when calling row_ptr().
280
+ //
281
+ // row_ptr_cache creates an array of pointers to rows, so, the access
282
+ // via row_ptr() may be faster. But it requires memory allocation
283
+ // when creating. For example, on typical Intel Pentium hardware
284
+ // row_ptr_cache speeds span_image_filter_rgb_nn up to 10%
285
+ //
286
+ // It's used only in short hand typedefs like pixfmt_rgba32 and can be
287
+ // redefined in agg_config.h
288
+ // In real applications you can use both, depending on your needs
289
+ //------------------------------------------------------------------------
290
+ #ifdef AGG_RENDERING_BUFFER
291
+ typedef AGG_RENDERING_BUFFER rendering_buffer;
292
+ #else
293
+ // typedef row_ptr_cache<int8u> rendering_buffer;
294
+ typedef row_accessor<int8u> rendering_buffer;
295
+ #endif
296
+
297
+ }
298
+
299
+
300
+ #endif
data/bundled_deps/agg/agg/agg_scanline_p.h ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+ //
16
+ // Class scanline_p - a general purpose scanline container with packed spans.
17
+ //
18
+ //----------------------------------------------------------------------------
19
+ //
20
+ // Adaptation for 32-bit screen coordinates (scanline32_p) has been sponsored by
21
+ // Liberty Technology Systems, Inc., visit http://lib-sys.com
22
+ //
23
+ // Liberty Technology Systems, Inc. is the provider of
24
+ // PostScript and PDF technology for software developers.
25
+ //
26
+ //----------------------------------------------------------------------------
27
+ #ifndef AGG_SCANLINE_P_INCLUDED
28
+ #define AGG_SCANLINE_P_INCLUDED
29
+
30
+ #include "agg_array.h"
31
+
32
+ namespace agg
33
+ {
34
+
35
+ //=============================================================scanline_p8
36
+ //
37
+ // This is a general purpose scaline container which supports the interface
38
+ // used in the rasterizer::render(). See description of scanline_u8
39
+ // for details.
40
+ //
41
+ //------------------------------------------------------------------------
42
+ class scanline_p8
43
+ {
44
+ public:
45
+ typedef scanline_p8 self_type;
46
+ typedef int8u cover_type;
47
+ typedef int16 coord_type;
48
+
49
+ //--------------------------------------------------------------------
50
+ struct span
51
+ {
52
+ coord_type x;
53
+ coord_type len; // If negative, it's a solid span, covers is valid
54
+ const cover_type* covers;
55
+ };
56
+
57
+ typedef span* iterator;
58
+ typedef const span* const_iterator;
59
+
60
+ scanline_p8() :
61
+ m_last_x(0x7FFFFFF0),
62
+ m_covers(),
63
+ m_cover_ptr(0),
64
+ m_spans(),
65
+ m_cur_span(0)
66
+ {
67
+ }
68
+
69
+ //--------------------------------------------------------------------
70
+ void reset(int min_x, int max_x)
71
+ {
72
+ unsigned max_len = max_x - min_x + 3;
73
+ if(max_len > m_spans.size())
74
+ {
75
+ m_spans.resize(max_len);
76
+ m_covers.resize(max_len);
77
+ }
78
+ m_last_x = 0x7FFFFFF0;
79
+ m_cover_ptr = &m_covers[0];
80
+ m_cur_span = &m_spans[0];
81
+ m_cur_span->len = 0;
82
+ }
83
+
84
+ //--------------------------------------------------------------------
85
+ void add_cell(int x, unsigned cover)
86
+ {
87
+ *m_cover_ptr = (cover_type)cover;
88
+ if(x == m_last_x+1 && m_cur_span->len > 0)
89
+ {
90
+ m_cur_span->len++;
91
+ }
92
+ else
93
+ {
94
+ m_cur_span++;
95
+ m_cur_span->covers = m_cover_ptr;
96
+ m_cur_span->x = (int16)x;
97
+ m_cur_span->len = 1;
98
+ }
99
+ m_last_x = x;
100
+ m_cover_ptr++;
101
+ }
102
+
103
+ //--------------------------------------------------------------------
104
+ void add_cells(int x, unsigned len, const cover_type* covers)
105
+ {
106
+ memcpy(m_cover_ptr, covers, len * sizeof(cover_type));
107
+ if(x == m_last_x+1 && m_cur_span->len > 0)
108
+ {
109
+ m_cur_span->len += (int16)len;
110
+ }
111
+ else
112
+ {
113
+ m_cur_span++;
114
+ m_cur_span->covers = m_cover_ptr;
115
+ m_cur_span->x = (int16)x;
116
+ m_cur_span->len = (int16)len;
117
+ }
118
+ m_cover_ptr += len;
119
+ m_last_x = x + len - 1;
120
+ }
121
+
122
+ //--------------------------------------------------------------------
123
+ void add_span(int x, unsigned len, unsigned cover)
124
+ {
125
+ if(x == m_last_x+1 &&
126
+ m_cur_span->len < 0 &&
127
+ cover == *m_cur_span->covers)
128
+ {
129
+ m_cur_span->len -= (int16)len;
130
+ }
131
+ else
132
+ {
133
+ *m_cover_ptr = (cover_type)cover;
134
+ m_cur_span++;
135
+ m_cur_span->covers = m_cover_ptr++;
136
+ m_cur_span->x = (int16)x;
137
+ m_cur_span->len = (int16)(-int(len));
138
+ }
139
+ m_last_x = x + len - 1;
140
+ }
141
+
142
+ //--------------------------------------------------------------------
143
+ void finalize(int y)
144
+ {
145
+ m_y = y;
146
+ }
147
+
148
+ //--------------------------------------------------------------------
149
+ void reset_spans()
150
+ {
151
+ m_last_x = 0x7FFFFFF0;
152
+ m_cover_ptr = &m_covers[0];
153
+ m_cur_span = &m_spans[0];
154
+ m_cur_span->len = 0;
155
+ }
156
+
157
+ //--------------------------------------------------------------------
158
+ int y() const { return m_y; }
159
+ unsigned num_spans() const { return unsigned(m_cur_span - &m_spans[0]); }
160
+ const_iterator begin() const { return &m_spans[1]; }
161
+
162
+ private:
163
+ scanline_p8(const self_type&);
164
+ const self_type& operator = (const self_type&);
165
+
166
+ int m_last_x;
167
+ int m_y;
168
+ pod_array<cover_type> m_covers;
169
+ cover_type* m_cover_ptr;
170
+ pod_array<span> m_spans;
171
+ span* m_cur_span;
172
+ };
173
+
174
+
175
+
176
+
177
+
178
+
179
+
180
+
181
+ //==========================================================scanline32_p8
182
+ class scanline32_p8
183
+ {
184
+ public:
185
+ typedef scanline32_p8 self_type;
186
+ typedef int8u cover_type;
187
+ typedef int32 coord_type;
188
+
189
+ struct span
190
+ {
191
+ span() {}
192
+ span(coord_type x_, coord_type len_, const cover_type* covers_) :
193
+ x(x_), len(len_), covers(covers_) {}
194
+
195
+ coord_type x;
196
+ coord_type len; // If negative, it's a solid span, covers is valid
197
+ const cover_type* covers;
198
+ };
199
+ typedef pod_bvector<span, 4> span_array_type;
200
+
201
+
202
+ //--------------------------------------------------------------------
203
+ class const_iterator
204
+ {
205
+ public:
206
+ const_iterator(const span_array_type& spans) :
207
+ m_spans(spans),
208
+ m_span_idx(0)
209
+ {}
210
+
211
+ const span& operator*() const { return m_spans[m_span_idx]; }
212
+ const span* operator->() const { return &m_spans[m_span_idx]; }
213
+
214
+ void operator ++ () { ++m_span_idx; }
215
+
216
+ private:
217
+ const span_array_type& m_spans;
218
+ unsigned m_span_idx;
219
+ };
220
+
221
+ //--------------------------------------------------------------------
222
+ scanline32_p8() :
223
+ m_max_len(0),
224
+ m_last_x(0x7FFFFFF0),
225
+ m_covers(),
226
+ m_cover_ptr(0)
227
+ {
228
+ }
229
+
230
+ //--------------------------------------------------------------------
231
+ void reset(int min_x, int max_x)
232
+ {
233
+ unsigned max_len = max_x - min_x + 3;
234
+ if(max_len > m_covers.size())
235
+ {
236
+ m_covers.resize(max_len);
237
+ }
238
+ m_last_x = 0x7FFFFFF0;
239
+ m_cover_ptr = &m_covers[0];
240
+ m_spans.remove_all();
241
+ }
242
+
243
+ //--------------------------------------------------------------------
244
+ void add_cell(int x, unsigned cover)
245
+ {
246
+ *m_cover_ptr = cover_type(cover);
247
+ if(x == m_last_x+1 && m_spans.size() && m_spans.last().len > 0)
248
+ {
249
+ m_spans.last().len++;
250
+ }
251
+ else
252
+ {
253
+ m_spans.add(span(coord_type(x), 1, m_cover_ptr));
254
+ }
255
+ m_last_x = x;
256
+ m_cover_ptr++;
257
+ }
258
+
259
+ //--------------------------------------------------------------------
260
+ void add_cells(int x, unsigned len, const cover_type* covers)
261
+ {
262
+ memcpy(m_cover_ptr, covers, len * sizeof(cover_type));
263
+ if(x == m_last_x+1 && m_spans.size() && m_spans.last().len > 0)
264
+ {
265
+ m_spans.last().len += coord_type(len);
266
+ }
267
+ else
268
+ {
269
+ m_spans.add(span(coord_type(x), coord_type(len), m_cover_ptr));
270
+ }
271
+ m_cover_ptr += len;
272
+ m_last_x = x + len - 1;
273
+ }
274
+
275
+ //--------------------------------------------------------------------
276
+ void add_span(int x, unsigned len, unsigned cover)
277
+ {
278
+ if(x == m_last_x+1 &&
279
+ m_spans.size() &&
280
+ m_spans.last().len < 0 &&
281
+ cover == *m_spans.last().covers)
282
+ {
283
+ m_spans.last().len -= coord_type(len);
284
+ }
285
+ else
286
+ {
287
+ *m_cover_ptr = cover_type(cover);
288
+ m_spans.add(span(coord_type(x), -coord_type(len), m_cover_ptr++));
289
+ }
290
+ m_last_x = x + len - 1;
291
+ }
292
+
293
+ //--------------------------------------------------------------------
294
+ void finalize(int y)
295
+ {
296
+ m_y = y;
297
+ }
298
+
299
+ //--------------------------------------------------------------------
300
+ void reset_spans()
301
+ {
302
+ m_last_x = 0x7FFFFFF0;
303
+ m_cover_ptr = &m_covers[0];
304
+ m_spans.remove_all();
305
+ }
306
+
307
+ //--------------------------------------------------------------------
308
+ int y() const { return m_y; }
309
+ unsigned num_spans() const { return m_spans.size(); }
310
+ const_iterator begin() const { return const_iterator(m_spans); }
311
+
312
+ private:
313
+ scanline32_p8(const self_type&);
314
+ const self_type& operator = (const self_type&);
315
+
316
+ unsigned m_max_len;
317
+ int m_last_x;
318
+ int m_y;
319
+ pod_array<cover_type> m_covers;
320
+ cover_type* m_cover_ptr;
321
+ span_array_type m_spans;
322
+ };
323
+
324
+
325
+ }
326
+
327
+
328
+ #endif
329
+
data/bundled_deps/agg/agg/agg_trans_affine.h ADDED
@@ -0,0 +1,518 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //----------------------------------------------------------------------------
2
+ // Anti-Grain Geometry - Version 2.4
3
+ // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4
+ //
5
+ // Permission to copy, use, modify, sell and distribute this software
6
+ // is granted provided this copyright notice appears in all copies.
7
+ // This software is provided "as is" without express or implied
8
+ // warranty, and with no claim as to its suitability for any purpose.
9
+ //
10
+ //----------------------------------------------------------------------------
11
+ // Contact: [email protected]
12
13
+ // http://www.antigrain.com
14
+ //----------------------------------------------------------------------------
15
+ //
16
+ // Affine transformation classes.
17
+ //
18
+ //----------------------------------------------------------------------------
19
+ #ifndef AGG_TRANS_AFFINE_INCLUDED
20
+ #define AGG_TRANS_AFFINE_INCLUDED
21
+
22
+ #include <math.h>
23
+ #include "agg_basics.h"
24
+
25
+ namespace agg
26
+ {
27
+ const double affine_epsilon = 1e-14;
28
+
29
+ //============================================================trans_affine
30
+ //
31
+ // See Implementation agg_trans_affine.cpp
32
+ //
33
+ // Affine transformation are linear transformations in Cartesian coordinates
34
+ // (strictly speaking not only in Cartesian, but for the beginning we will
35
+ // think so). They are rotation, scaling, translation and skewing.
36
+ // After any affine transformation a line segment remains a line segment
37
+ // and it will never become a curve.
38
+ //
39
+ // There will be no math about matrix calculations, since it has been
40
+ // described many times. Ask yourself a very simple question:
41
+ // "why do we need to understand and use some matrix stuff instead of just
42
+ // rotating, scaling and so on". The answers are:
43
+ //
44
+ // 1. Any combination of transformations can be done by only 4 multiplications
45
+ // and 4 additions in floating point.
46
+ // 2. One matrix transformation is equivalent to the number of consecutive
47
+ // discrete transformations, i.e. the matrix "accumulates" all transformations
48
+ // in the order of their settings. Suppose we have 4 transformations:
49
+ // * rotate by 30 degrees,
50
+ // * scale X to 2.0,
51
+ // * scale Y to 1.5,
52
+ // * move to (100, 100).
53
+ // The result will depend on the order of these transformations,
54
+ // and the advantage of matrix is that the sequence of discret calls:
55
+ // rotate(30), scaleX(2.0), scaleY(1.5), move(100,100)
56
+ // will have exactly the same result as the following matrix transformations:
57
+ //
58
+ // affine_matrix m;
59
+ // m *= rotate_matrix(30);
60
+ // m *= scaleX_matrix(2.0);
61
+ // m *= scaleY_matrix(1.5);
62
+ // m *= move_matrix(100,100);
63
+ //
64
+ // m.transform_my_point_at_last(x, y);
65
+ //
66
+ // What is the good of it? In real life we will set-up the matrix only once
67
+ // and then transform many points, let alone the convenience to set any
68
+ // combination of transformations.
69
+ //
70
+ // So, how to use it? Very easy - literally as it's shown above. Not quite,
71
+ // let us write a correct example:
72
+ //
73
+ // agg::trans_affine m;
74
+ // m *= agg::trans_affine_rotation(30.0 * 3.1415926 / 180.0);
75
+ // m *= agg::trans_affine_scaling(2.0, 1.5);
76
+ // m *= agg::trans_affine_translation(100.0, 100.0);
77
+ // m.transform(&x, &y);
78
+ //
79
+ // The affine matrix is all you need to perform any linear transformation,
80
+ // but all transformations have origin point (0,0). It means that we need to
81
+ // use 2 translations if we want to rotate someting around (100,100):
82
+ //
83
+ // m *= agg::trans_affine_translation(-100.0, -100.0); // move to (0,0)
84
+ // m *= agg::trans_affine_rotation(30.0 * 3.1415926 / 180.0); // rotate
85
+ // m *= agg::trans_affine_translation(100.0, 100.0); // move back to (100,100)
86
+ //----------------------------------------------------------------------
87
+ struct trans_affine
88
+ {
89
+ double sx, shy, shx, sy, tx, ty;
90
+
91
+ //------------------------------------------ Construction
92
+ // Identity matrix
93
+ trans_affine() :
94
+ sx(1.0), shy(0.0), shx(0.0), sy(1.0), tx(0.0), ty(0.0)
95
+ {}
96
+
97
+ // Custom matrix. Usually used in derived classes
98
+ trans_affine(double v0, double v1, double v2,
99
+ double v3, double v4, double v5) :
100
+ sx(v0), shy(v1), shx(v2), sy(v3), tx(v4), ty(v5)
101
+ {}
102
+
103
+ // Custom matrix from m[6]
104
+ explicit trans_affine(const double* m) :
105
+ sx(m[0]), shy(m[1]), shx(m[2]), sy(m[3]), tx(m[4]), ty(m[5])
106
+ {}
107
+
108
+ // Rectangle to a parallelogram.
109
+ trans_affine(double x1, double y1, double x2, double y2,
110
+ const double* parl)
111
+ {
112
+ rect_to_parl(x1, y1, x2, y2, parl);
113
+ }
114
+
115
+ // Parallelogram to a rectangle.
116
+ trans_affine(const double* parl,
117
+ double x1, double y1, double x2, double y2)
118
+ {
119
+ parl_to_rect(parl, x1, y1, x2, y2);
120
+ }
121
+
122
+ // Arbitrary parallelogram transformation.
123
+ trans_affine(const double* src, const double* dst)
124
+ {
125
+ parl_to_parl(src, dst);
126
+ }
127
+
128
+ //---------------------------------- Parellelogram transformations
129
+ // transform a parallelogram to another one. Src and dst are
130
+ // pointers to arrays of three points (double[6], x1,y1,...) that
131
+ // identify three corners of the parallelograms assuming implicit
132
+ // fourth point. The arguments are arrays of double[6] mapped
133
+ // to x1,y1, x2,y2, x3,y3 where the coordinates are:
134
+ // *-----------------*
135
+ // / (x3,y3)/
136
+ // / /
137
+ // /(x1,y1) (x2,y2)/
138
+ // *-----------------*
139
+ const trans_affine& parl_to_parl(const double* src,
140
+ const double* dst);
141
+
142
+ const trans_affine& rect_to_parl(double x1, double y1,
143
+ double x2, double y2,
144
+ const double* parl);
145
+
146
+ const trans_affine& parl_to_rect(const double* parl,
147
+ double x1, double y1,
148
+ double x2, double y2);
149
+
150
+
151
+ //------------------------------------------ Operations
152
+ // Reset - load an identity matrix
153
+ const trans_affine& reset();
154
+
155
+ // Direct transformations operations
156
+ const trans_affine& translate(double x, double y);
157
+ const trans_affine& rotate(double a);
158
+ const trans_affine& scale(double s);
159
+ const trans_affine& scale(double x, double y);
160
+
161
+ // Multiply matrix to another one
162
+ const trans_affine& multiply(const trans_affine& m);
163
+
164
+ // Multiply "m" to "this" and assign the result to "this"
165
+ const trans_affine& premultiply(const trans_affine& m);
166
+
167
+ // Multiply matrix to inverse of another one
168
+ const trans_affine& multiply_inv(const trans_affine& m);
169
+
170
+ // Multiply inverse of "m" to "this" and assign the result to "this"
171
+ const trans_affine& premultiply_inv(const trans_affine& m);
172
+
173
+ // Invert matrix. Do not try to invert degenerate matrices,
174
+ // there's no check for validity. If you set scale to 0 and
175
+ // then try to invert matrix, expect unpredictable result.
176
+ const trans_affine& invert();
177
+
178
+ // Mirroring around X
179
+ const trans_affine& flip_x();
180
+
181
+ // Mirroring around Y
182
+ const trans_affine& flip_y();
183
+
184
+ //------------------------------------------- Load/Store
185
+ // Store matrix to an array [6] of double
186
+ void store_to(double* m) const
187
+ {
188
+ *m++ = sx; *m++ = shy; *m++ = shx; *m++ = sy; *m++ = tx; *m++ = ty;
189
+ }
190
+
191
+ // Load matrix from an array [6] of double
192
+ const trans_affine& load_from(const double* m)
193
+ {
194
+ sx = *m++; shy = *m++; shx = *m++; sy = *m++; tx = *m++; ty = *m++;
195
+ return *this;
196
+ }
197
+
198
+ //------------------------------------------- Operators
199
+
200
+ // Multiply the matrix by another one
201
+ const trans_affine& operator *= (const trans_affine& m)
202
+ {
203
+ return multiply(m);
204
+ }
205
+
206
+ // Multiply the matrix by inverse of another one
207
+ const trans_affine& operator /= (const trans_affine& m)
208
+ {
209
+ return multiply_inv(m);
210
+ }
211
+
212
+ // Multiply the matrix by another one and return
213
+ // the result in a separete matrix.
214
+ trans_affine operator * (const trans_affine& m) const
215
+ {
216
+ return trans_affine(*this).multiply(m);
217
+ }
218
+
219
+ // Multiply the matrix by inverse of another one
220
+ // and return the result in a separete matrix.
221
+ trans_affine operator / (const trans_affine& m) const
222
+ {
223
+ return trans_affine(*this).multiply_inv(m);
224
+ }
225
+
226
+ // Calculate and return the inverse matrix
227
+ trans_affine operator ~ () const
228
+ {
229
+ trans_affine ret = *this;
230
+ return ret.invert();
231
+ }
232
+
233
+ // Equal operator with default epsilon
234
+ bool operator == (const trans_affine& m) const
235
+ {
236
+ return is_equal(m, affine_epsilon);
237
+ }
238
+
239
+ // Not Equal operator with default epsilon
240
+ bool operator != (const trans_affine& m) const
241
+ {
242
+ return !is_equal(m, affine_epsilon);
243
+ }
244
+
245
+ //-------------------------------------------- Transformations
246
+ // Direct transformation of x and y
247
+ void transform(double* x, double* y) const;
248
+
249
+ // Direct transformation of x and y, 2x2 matrix only, no translation
250
+ void transform_2x2(double* x, double* y) const;
251
+
252
+ // Inverse transformation of x and y. It works slower than the
253
+ // direct transformation. For massive operations it's better to
254
+ // invert() the matrix and then use direct transformations.
255
+ void inverse_transform(double* x, double* y) const;
256
+
257
+ //-------------------------------------------- Auxiliary
258
+ // Calculate the determinant of matrix
259
+ double determinant() const
260
+ {
261
+ return sx * sy - shy * shx;
262
+ }
263
+
264
+ // Calculate the reciprocal of the determinant
265
+ double determinant_reciprocal() const
266
+ {
267
+ return 1.0 / (sx * sy - shy * shx);
268
+ }
269
+
270
+ // Get the average scale (by X and Y).
271
+ // Basically used to calculate the approximation_scale when
272
+ // decomposinting curves into line segments.
273
+ double scale() const;
274
+
275
+ // Check to see if the matrix is not degenerate
276
+ bool is_valid(double epsilon = affine_epsilon) const;
277
+
278
+ // Check to see if it's an identity matrix
279
+ bool is_identity(double epsilon = affine_epsilon) const;
280
+
281
+ // Check to see if two matrices are equal
282
+ bool is_equal(const trans_affine& m, double epsilon = affine_epsilon) const;
283
+
284
+ // Determine the major parameters. Use with caution considering
285
+ // possible degenerate cases.
286
+ double rotation() const;
287
+ void translation(double* dx, double* dy) const;
288
+ void scaling(double* x, double* y) const;
289
+ void scaling_abs(double* x, double* y) const;
290
+ };
291
+
292
+ //------------------------------------------------------------------------
293
+ inline void trans_affine::transform(double* x, double* y) const
294
+ {
295
+ double tmp = *x;
296
+ *x = tmp * sx + *y * shx + tx;
297
+ *y = tmp * shy + *y * sy + ty;
298
+ }
299
+
300
+ //------------------------------------------------------------------------
301
+ inline void trans_affine::transform_2x2(double* x, double* y) const
302
+ {
303
+ double tmp = *x;
304
+ *x = tmp * sx + *y * shx;
305
+ *y = tmp * shy + *y * sy;
306
+ }
307
+
308
+ //------------------------------------------------------------------------
309
+ inline void trans_affine::inverse_transform(double* x, double* y) const
310
+ {
311
+ double d = determinant_reciprocal();
312
+ double a = (*x - tx) * d;
313
+ double b = (*y - ty) * d;
314
+ *x = a * sy - b * shx;
315
+ *y = b * sx - a * shy;
316
+ }
317
+
318
+ //------------------------------------------------------------------------
319
+ inline double trans_affine::scale() const
320
+ {
321
+ double x = 0.707106781 * sx + 0.707106781 * shx;
322
+ double y = 0.707106781 * shy + 0.707106781 * sy;
323
+ return sqrt(x*x + y*y);
324
+ }
325
+
326
+ //------------------------------------------------------------------------
327
+ inline const trans_affine& trans_affine::translate(double x, double y)
328
+ {
329
+ tx += x;
330
+ ty += y;
331
+ return *this;
332
+ }
333
+
334
+ //------------------------------------------------------------------------
335
+ inline const trans_affine& trans_affine::rotate(double a)
336
+ {
337
+ double ca = cos(a);
338
+ double sa = sin(a);
339
+ double t0 = sx * ca - shy * sa;
340
+ double t2 = shx * ca - sy * sa;
341
+ double t4 = tx * ca - ty * sa;
342
+ shy = sx * sa + shy * ca;
343
+ sy = shx * sa + sy * ca;
344
+ ty = tx * sa + ty * ca;
345
+ sx = t0;
346
+ shx = t2;
347
+ tx = t4;
348
+ return *this;
349
+ }
350
+
351
+ //------------------------------------------------------------------------
352
+ inline const trans_affine& trans_affine::scale(double x, double y)
353
+ {
354
+ double mm0 = x; // Possible hint for the optimizer
355
+ double mm3 = y;
356
+ sx *= mm0;
357
+ shx *= mm0;
358
+ tx *= mm0;
359
+ shy *= mm3;
360
+ sy *= mm3;
361
+ ty *= mm3;
362
+ return *this;
363
+ }
364
+
365
+ //------------------------------------------------------------------------
366
+ inline const trans_affine& trans_affine::scale(double s)
367
+ {
368
+ double m = s; // Possible hint for the optimizer
369
+ sx *= m;
370
+ shx *= m;
371
+ tx *= m;
372
+ shy *= m;
373
+ sy *= m;
374
+ ty *= m;
375
+ return *this;
376
+ }
377
+
378
+ //------------------------------------------------------------------------
379
+ inline const trans_affine& trans_affine::premultiply(const trans_affine& m)
380
+ {
381
+ trans_affine t = m;
382
+ return *this = t.multiply(*this);
383
+ }
384
+
385
+ //------------------------------------------------------------------------
386
+ inline const trans_affine& trans_affine::multiply_inv(const trans_affine& m)
387
+ {
388
+ trans_affine t = m;
389
+ t.invert();
390
+ return multiply(t);
391
+ }
392
+
393
+ //------------------------------------------------------------------------
394
+ inline const trans_affine& trans_affine::premultiply_inv(const trans_affine& m)
395
+ {
396
+ trans_affine t = m;
397
+ t.invert();
398
+ return *this = t.multiply(*this);
399
+ }
400
+
401
+ //------------------------------------------------------------------------
402
+ inline void trans_affine::scaling_abs(double* x, double* y) const
403
+ {
404
+ // Used to calculate scaling coefficients in image resampling.
405
+ // When there is considerable shear this method gives us much
406
+ // better estimation than just sx, sy.
407
+ *x = sqrt(sx * sx + shx * shx);
408
+ *y = sqrt(shy * shy + sy * sy);
409
+ }
410
+
411
+ //====================================================trans_affine_rotation
412
+ // Rotation matrix. sin() and cos() are calculated twice for the same angle.
413
+ // There's no harm because the performance of sin()/cos() is very good on all
414
+ // modern processors. Besides, this operation is not going to be invoked too
415
+ // often.
416
+ class trans_affine_rotation : public trans_affine
417
+ {
418
+ public:
419
+ trans_affine_rotation(double a) :
420
+ trans_affine(cos(a), sin(a), -sin(a), cos(a), 0.0, 0.0)
421
+ {}
422
+ };
423
+
424
+ //====================================================trans_affine_scaling
425
+ // Scaling matrix. x, y - scale coefficients by X and Y respectively
426
+ class trans_affine_scaling : public trans_affine
427
+ {
428
+ public:
429
+ trans_affine_scaling(double x, double y) :
430
+ trans_affine(x, 0.0, 0.0, y, 0.0, 0.0)
431
+ {}
432
+
433
+ trans_affine_scaling(double s) :
434
+ trans_affine(s, 0.0, 0.0, s, 0.0, 0.0)
435
+ {}
436
+ };
437
+
438
+ //================================================trans_affine_translation
439
+ // Translation matrix
440
+ class trans_affine_translation : public trans_affine
441
+ {
442
+ public:
443
+ trans_affine_translation(double x, double y) :
444
+ trans_affine(1.0, 0.0, 0.0, 1.0, x, y)
445
+ {}
446
+ };
447
+
448
+ //====================================================trans_affine_skewing
449
+ // Sckewing (shear) matrix
450
+ class trans_affine_skewing : public trans_affine
451
+ {
452
+ public:
453
+ trans_affine_skewing(double x, double y) :
454
+ trans_affine(1.0, tan(y), tan(x), 1.0, 0.0, 0.0)
455
+ {}
456
+ };
457
+
458
+
459
+ //===============================================trans_affine_line_segment
460
+ // Rotate, Scale and Translate, associating 0...dist with line segment
461
+ // x1,y1,x2,y2
462
+ class trans_affine_line_segment : public trans_affine
463
+ {
464
+ public:
465
+ trans_affine_line_segment(double x1, double y1, double x2, double y2,
466
+ double dist)
467
+ {
468
+ double dx = x2 - x1;
469
+ double dy = y2 - y1;
470
+ if(dist > 0.0)
471
+ {
472
+ multiply(trans_affine_scaling(sqrt(dx * dx + dy * dy) / dist));
473
+ }
474
+ multiply(trans_affine_rotation(atan2(dy, dx)));
475
+ multiply(trans_affine_translation(x1, y1));
476
+ }
477
+ };
478
+
479
+
480
+ //============================================trans_affine_reflection_unit
481
+ // Reflection matrix. Reflect coordinates across the line through
482
+ // the origin containing the unit vector (ux, uy).
483
+ // Contributed by John Horigan
484
+ class trans_affine_reflection_unit : public trans_affine
485
+ {
486
+ public:
487
+ trans_affine_reflection_unit(double ux, double uy) :
488
+ trans_affine(2.0 * ux * ux - 1.0,
489
+ 2.0 * ux * uy,
490
+ 2.0 * ux * uy,
491
+ 2.0 * uy * uy - 1.0,
492
+ 0.0, 0.0)
493
+ {}
494
+ };
495
+
496
+
497
+ //=================================================trans_affine_reflection
498
+ // Reflection matrix. Reflect coordinates across the line through
499
+ // the origin at the angle a or containing the non-unit vector (x, y).
500
+ // Contributed by John Horigan
501
+ class trans_affine_reflection : public trans_affine_reflection_unit
502
+ {
503
+ public:
504
+ trans_affine_reflection(double a) :
505
+ trans_affine_reflection_unit(cos(a), sin(a))
506
+ {}
507
+
508
+
509
+ trans_affine_reflection(double x, double y) :
510
+ trans_affine_reflection_unit(x / sqrt(x * x + y * y), y / sqrt(x * x + y * y))
511
+ {}
512
+ };
513
+
514
+ }
515
+
516
+
517
+ #endif
518
+
data/bundled_deps/agg/agg/copying ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The Anti-Grain Geometry Project
2
+ A high quality rendering engine for C++
3
+ http://antigrain.com
4
+
5
+ Anti-Grain Geometry has dual licensing model. The Modified BSD
6
+ License was first added in version v2.4 just for convenience.
7
+ It is a simple, permissive non-copyleft free software license,
8
+ compatible with the GNU GPL. It's well proven and recognizable.
9
+ See http://www.fsf.org/licensing/licenses/index_html#ModifiedBSD
10
+ for details.
11
+
12
+ Note that the Modified BSD license DOES NOT restrict your rights
13
+ if you choose the Anti-Grain Geometry Public License.
14
+
15
+
16
+
17
+
18
+ Anti-Grain Geometry Public License
19
+ ====================================================
20
+
21
+ Anti-Grain Geometry - Version 2.4
22
+ Copyright (C) 2002-2005 Maxim Shemanarev (McSeem)
23
+
24
+ Permission to copy, use, modify, sell and distribute this software
25
+ is granted provided this copyright notice appears in all copies.
26
+ This software is provided "as is" without express or implied
27
+ warranty, and with no claim as to its suitability for any purpose.
28
+
29
+
30
+
31
+
32
+
33
+ Modified BSD License
34
+ ====================================================
35
+ Anti-Grain Geometry - Version 2.4
36
+ Copyright (C) 2002-2005 Maxim Shemanarev (McSeem)
37
+
38
+ Redistribution and use in source and binary forms, with or without
39
+ modification, are permitted provided that the following conditions
40
+ are met:
41
+
42
+ 1. Redistributions of source code must retain the above copyright
43
+ notice, this list of conditions and the following disclaimer.
44
+
45
+ 2. Redistributions in binary form must reproduce the above copyright
46
+ notice, this list of conditions and the following disclaimer in
47
+ the documentation and/or other materials provided with the
48
+ distribution.
49
+
50
+ 3. The name of the author may not be used to endorse or promote
51
+ products derived from this software without specific prior
52
+ written permission.
53
+
54
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
56
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57
+ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
58
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
59
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
60
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
62
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
63
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64
+ POSSIBILITY OF SUCH DAMAGE.
65
+
data/bundled_deps/ankerl/README.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ THIS DIRECTORY CONTAINS PIECES OF THE
2
+ ankerl::unordered_dense::{map, set}
3
+ https://github.com/martinus/unordered_dense
4
+ unordered_dense 3.1.1 10782bfc651c2bb75b11bf90491f50da122e5432
5
+ SOURCE DISTRIBUTION.
6
+
7
+ THIS IS NOT THE COMPLETE unordered_dense DISTRIBUTION. ONLY FILES NEEDED FOR COMPILING PRUSASLICER WERE PUT INTO THE PRUSASLICER SOURCE DISTRIBUTION.
data/bundled_deps/ankerl/ankerl/unordered_dense.h ADDED
@@ -0,0 +1,1584 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ///////////////////////// ankerl::unordered_dense::{map, set} /////////////////////////
2
+
3
+ // A fast & densely stored hashmap and hashset based on robin-hood backward shift deletion.
4
+ // Version 3.1.1
5
+ // https://github.com/martinus/unordered_dense
6
+ //
7
+ // Licensed under the MIT License <http://opensource.org/licenses/MIT>.
8
+ // SPDX-License-Identifier: MIT
9
+ // Copyright (c) 2022-2023 Martin Leitner-Ankerl <[email protected]>
10
+ //
11
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
12
+ // of this software and associated documentation files (the "Software"), to deal
13
+ // in the Software without restriction, including without limitation the rights
14
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15
+ // copies of the Software, and to permit persons to whom the Software is
16
+ // furnished to do so, subject to the following conditions:
17
+ //
18
+ // The above copyright notice and this permission notice shall be included in all
19
+ // copies or substantial portions of the Software.
20
+ //
21
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27
+ // SOFTWARE.
28
+
29
+ #ifndef ANKERL_UNORDERED_DENSE_H
30
+ #define ANKERL_UNORDERED_DENSE_H
31
+
32
+ // see https://semver.org/spec/v2.0.0.html
33
+ #define ANKERL_UNORDERED_DENSE_VERSION_MAJOR 3 // NOLINT(cppcoreguidelines-macro-usage) incompatible API changes
34
+ #define ANKERL_UNORDERED_DENSE_VERSION_MINOR 1 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible functionality
35
+ #define ANKERL_UNORDERED_DENSE_VERSION_PATCH 1 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible bug fixes
36
+
37
+ // API versioning with inline namespace, see https://www.foonathan.net/2018/11/inline-namespaces/
38
+ #define ANKERL_UNORDERED_DENSE_VERSION_CONCAT1(major, minor, patch) v##major##_##minor##_##patch
39
+ #define ANKERL_UNORDERED_DENSE_VERSION_CONCAT(major, minor, patch) ANKERL_UNORDERED_DENSE_VERSION_CONCAT1(major, minor, patch)
40
+ #define ANKERL_UNORDERED_DENSE_NAMESPACE \
41
+ ANKERL_UNORDERED_DENSE_VERSION_CONCAT( \
42
+ ANKERL_UNORDERED_DENSE_VERSION_MAJOR, ANKERL_UNORDERED_DENSE_VERSION_MINOR, ANKERL_UNORDERED_DENSE_VERSION_PATCH)
43
+
44
+ #if defined(_MSVC_LANG)
45
+ # define ANKERL_UNORDERED_DENSE_CPP_VERSION _MSVC_LANG
46
+ #else
47
+ # define ANKERL_UNORDERED_DENSE_CPP_VERSION __cplusplus
48
+ #endif
49
+
50
+ #if defined(__GNUC__)
51
+ // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
52
+ # define ANKERL_UNORDERED_DENSE_PACK(decl) decl __attribute__((__packed__))
53
+ #elif defined(_MSC_VER)
54
+ // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
55
+ # define ANKERL_UNORDERED_DENSE_PACK(decl) __pragma(pack(push, 1)) decl __pragma(pack(pop))
56
+ #endif
57
+
58
+ // exceptions
59
+ #if defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)
60
+ # define ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() 1
61
+ #else
62
+ # define ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() 0
63
+ #endif
64
+ #ifdef _MSC_VER
65
+ # define ANKERL_UNORDERED_DENSE_NOINLINE __declspec(noinline)
66
+ #else
67
+ # define ANKERL_UNORDERED_DENSE_NOINLINE __attribute__((noinline))
68
+ #endif
69
+
70
+ #if ANKERL_UNORDERED_DENSE_CPP_VERSION < 201703L
71
+ # error ankerl::unordered_dense requires C++17 or higher
72
+ #else
73
+ # include <array> // for array
74
+ # include <cstdint> // for uint64_t, uint32_t, uint8_t, UINT64_C
75
+ # include <cstring> // for size_t, memcpy, memset
76
+ # include <functional> // for equal_to, hash
77
+ # include <initializer_list> // for initializer_list
78
+ # include <iterator> // for pair, distance
79
+ # include <limits> // for numeric_limits
80
+ # include <memory> // for allocator, allocator_traits, shared_ptr
81
+ # include <stdexcept> // for out_of_range
82
+ # include <string> // for basic_string
83
+ # include <string_view> // for basic_string_view, hash
84
+ # include <tuple> // for forward_as_tuple
85
+ # include <type_traits> // for enable_if_t, declval, conditional_t, ena...
86
+ # include <utility> // for forward, exchange, pair, as_const, piece...
87
+ # include <vector> // for vector
88
+ # if ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() == 0
89
+ # include <cstdlib> // for abort
90
+ # endif
91
+
92
+ # define ANKERL_UNORDERED_DENSE_PMR 0 // NOLINT(cppcoreguidelines-macro-usage)
93
+ # if defined(__has_include)
94
+ # if __has_include(<memory_resource>)
95
+ # undef ANKERL_UNORDERED_DENSE_PMR
96
+ # define ANKERL_UNORDERED_DENSE_PMR 1 // NOLINT(cppcoreguidelines-macro-usage)
97
+ # define ANKERL_UNORDERED_DENSE_PMR_ALLOCATOR \
98
+ std::pmr::polymorphic_allocator // NOLINT(cppcoreguidelines-macro-usage)
99
+ # include <memory_resource> // for polymorphic_allocator
100
+ # elif __has_include(<experimental/memory_resource>)
101
+ # undef ANKERL_UNORDERED_DENSE_PMR
102
+ # define ANKERL_UNORDERED_DENSE_PMR 1 // NOLINT(cppcoreguidelines-macro-usage)
103
+ # define ANKERL_UNORDERED_DENSE_PMR_ALLOCATOR \
104
+ std::experimental::pmr::polymorphic_allocator // NOLINT(cppcoreguidelines-macro-usage)
105
+ # include <experimental/memory_resource> // for polymorphic_allocator
106
+ # endif
107
+ # endif
108
+
109
+ # if defined(_MSC_VER) && defined(_M_X64)
110
+ # include <intrin.h>
111
+ # pragma intrinsic(_umul128)
112
+ # endif
113
+
114
+ # if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
115
+ # define ANKERL_UNORDERED_DENSE_LIKELY(x) __builtin_expect(x, 1) // NOLINT(cppcoreguidelines-macro-usage)
116
+ # define ANKERL_UNORDERED_DENSE_UNLIKELY(x) __builtin_expect(x, 0) // NOLINT(cppcoreguidelines-macro-usage)
117
+ # else
118
+ # define ANKERL_UNORDERED_DENSE_LIKELY(x) (x) // NOLINT(cppcoreguidelines-macro-usage)
119
+ # define ANKERL_UNORDERED_DENSE_UNLIKELY(x) (x) // NOLINT(cppcoreguidelines-macro-usage)
120
+ # endif
121
+
122
+ namespace ankerl::unordered_dense {
123
+ inline namespace ANKERL_UNORDERED_DENSE_NAMESPACE {
124
+
125
+ namespace detail {
126
+
127
+ # if ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS()
128
+
129
+ // make sure this is not inlined as it is slow and dramatically enlarges code, thus making other
130
+ // inlinings more difficult. Throws are also generally the slow path.
131
+ [[noreturn]] inline ANKERL_UNORDERED_DENSE_NOINLINE void on_error_key_not_found() {
132
+ throw std::out_of_range("ankerl::unordered_dense::map::at(): key not found");
133
+ }
134
+ [[noreturn]] inline ANKERL_UNORDERED_DENSE_NOINLINE void on_error_bucket_overflow() {
135
+ throw std::overflow_error("ankerl::unordered_dense: reached max bucket size, cannot increase size");
136
+ }
137
+ [[noreturn]] inline ANKERL_UNORDERED_DENSE_NOINLINE void on_error_too_many_elements() {
138
+ throw std::out_of_range("ankerl::unordered_dense::map::replace(): too many elements");
139
+ }
140
+
141
+ # else
142
+
143
+ [[noreturn]] inline void on_error_key_not_found() {
144
+ abort();
145
+ }
146
+ [[noreturn]] inline void on_error_bucket_overflow() {
147
+ abort();
148
+ }
149
+ [[noreturn]] inline void on_error_too_many_elements() {
150
+ abort();
151
+ }
152
+
153
+ # endif
154
+
155
+ } // namespace detail
156
+
157
+ // hash ///////////////////////////////////////////////////////////////////////
158
+
159
+ // This is a stripped-down implementation of wyhash: https://github.com/wangyi-fudan/wyhash
160
+ // No big-endian support (because different values on different machines don't matter),
161
+ // hardcodes seed and the secret, reformattes the code, and clang-tidy fixes.
162
+ namespace detail::wyhash {
163
+
164
+ static inline void mum(uint64_t* a, uint64_t* b) {
165
+ # if defined(__SIZEOF_INT128__)
166
+ __uint128_t r = *a;
167
+ r *= *b;
168
+ *a = static_cast<uint64_t>(r);
169
+ *b = static_cast<uint64_t>(r >> 64U);
170
+ # elif defined(_MSC_VER) && defined(_M_X64)
171
+ *a = _umul128(*a, *b, b);
172
+ # else
173
+ uint64_t ha = *a >> 32U;
174
+ uint64_t hb = *b >> 32U;
175
+ uint64_t la = static_cast<uint32_t>(*a);
176
+ uint64_t lb = static_cast<uint32_t>(*b);
177
+ uint64_t hi{};
178
+ uint64_t lo{};
179
+ uint64_t rh = ha * hb;
180
+ uint64_t rm0 = ha * lb;
181
+ uint64_t rm1 = hb * la;
182
+ uint64_t rl = la * lb;
183
+ uint64_t t = rl + (rm0 << 32U);
184
+ auto c = static_cast<uint64_t>(t < rl);
185
+ lo = t + (rm1 << 32U);
186
+ c += static_cast<uint64_t>(lo < t);
187
+ hi = rh + (rm0 >> 32U) + (rm1 >> 32U) + c;
188
+ *a = lo;
189
+ *b = hi;
190
+ # endif
191
+ }
192
+
193
+ // multiply and xor mix function, aka MUM
194
+ [[nodiscard]] static inline auto mix(uint64_t a, uint64_t b) -> uint64_t {
195
+ mum(&a, &b);
196
+ return a ^ b;
197
+ }
198
+
199
+ // read functions. WARNING: we don't care about endianness, so results are different on big endian!
200
+ [[nodiscard]] static inline auto r8(const uint8_t* p) -> uint64_t {
201
+ uint64_t v{};
202
+ std::memcpy(&v, p, 8U);
203
+ return v;
204
+ }
205
+
206
+ [[nodiscard]] static inline auto r4(const uint8_t* p) -> uint64_t {
207
+ uint32_t v{};
208
+ std::memcpy(&v, p, 4);
209
+ return v;
210
+ }
211
+
212
+ // reads 1, 2, or 3 bytes
213
+ [[nodiscard]] static inline auto r3(const uint8_t* p, size_t k) -> uint64_t {
214
+ return (static_cast<uint64_t>(p[0]) << 16U) | (static_cast<uint64_t>(p[k >> 1U]) << 8U) | p[k - 1];
215
+ }
216
+
217
+ [[maybe_unused]] [[nodiscard]] static inline auto hash(void const* key, size_t len) -> uint64_t {
218
+ static constexpr auto secret = std::array{UINT64_C(0xa0761d6478bd642f),
219
+ UINT64_C(0xe7037ed1a0b428db),
220
+ UINT64_C(0x8ebc6af09c88c6e3),
221
+ UINT64_C(0x589965cc75374cc3)};
222
+
223
+ auto const* p = static_cast<uint8_t const*>(key);
224
+ uint64_t seed = secret[0];
225
+ uint64_t a{};
226
+ uint64_t b{};
227
+ if (ANKERL_UNORDERED_DENSE_LIKELY(len <= 16)) {
228
+ if (ANKERL_UNORDERED_DENSE_LIKELY(len >= 4)) {
229
+ a = (r4(p) << 32U) | r4(p + ((len >> 3U) << 2U));
230
+ b = (r4(p + len - 4) << 32U) | r4(p + len - 4 - ((len >> 3U) << 2U));
231
+ } else if (ANKERL_UNORDERED_DENSE_LIKELY(len > 0)) {
232
+ a = r3(p, len);
233
+ b = 0;
234
+ } else {
235
+ a = 0;
236
+ b = 0;
237
+ }
238
+ } else {
239
+ size_t i = len;
240
+ if (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 48)) {
241
+ uint64_t see1 = seed;
242
+ uint64_t see2 = seed;
243
+ do {
244
+ seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed);
245
+ see1 = mix(r8(p + 16) ^ secret[2], r8(p + 24) ^ see1);
246
+ see2 = mix(r8(p + 32) ^ secret[3], r8(p + 40) ^ see2);
247
+ p += 48;
248
+ i -= 48;
249
+ } while (ANKERL_UNORDERED_DENSE_LIKELY(i > 48));
250
+ seed ^= see1 ^ see2;
251
+ }
252
+ while (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 16)) {
253
+ seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed);
254
+ i -= 16;
255
+ p += 16;
256
+ }
257
+ a = r8(p + i - 16);
258
+ b = r8(p + i - 8);
259
+ }
260
+
261
+ return mix(secret[1] ^ len, mix(a ^ secret[1], b ^ seed));
262
+ }
263
+
264
+ [[nodiscard]] static inline auto hash(uint64_t x) -> uint64_t {
265
+ return detail::wyhash::mix(x, UINT64_C(0x9E3779B97F4A7C15));
266
+ }
267
+
268
+ } // namespace detail::wyhash
269
+
270
+ template <typename T, typename Enable = void>
271
+ struct hash {
272
+ auto operator()(T const& obj) const noexcept(noexcept(std::declval<std::hash<T>>().operator()(std::declval<T const&>())))
273
+ -> uint64_t {
274
+ return std::hash<T>{}(obj);
275
+ }
276
+ };
277
+
278
+ template <typename CharT>
279
+ struct hash<std::basic_string<CharT>> {
280
+ using is_avalanching = void;
281
+ auto operator()(std::basic_string<CharT> const& str) const noexcept -> uint64_t {
282
+ return detail::wyhash::hash(str.data(), sizeof(CharT) * str.size());
283
+ }
284
+ };
285
+
286
+ template <typename CharT>
287
+ struct hash<std::basic_string_view<CharT>> {
288
+ using is_avalanching = void;
289
+ auto operator()(std::basic_string_view<CharT> const& sv) const noexcept -> uint64_t {
290
+ return detail::wyhash::hash(sv.data(), sizeof(CharT) * sv.size());
291
+ }
292
+ };
293
+
294
+ template <class T>
295
+ struct hash<T*> {
296
+ using is_avalanching = void;
297
+ auto operator()(T* ptr) const noexcept -> uint64_t {
298
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
299
+ return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr));
300
+ }
301
+ };
302
+
303
+ template <class T>
304
+ struct hash<std::unique_ptr<T>> {
305
+ using is_avalanching = void;
306
+ auto operator()(std::unique_ptr<T> const& ptr) const noexcept -> uint64_t {
307
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
308
+ return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr.get()));
309
+ }
310
+ };
311
+
312
+ template <class T>
313
+ struct hash<std::shared_ptr<T>> {
314
+ using is_avalanching = void;
315
+ auto operator()(std::shared_ptr<T> const& ptr) const noexcept -> uint64_t {
316
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
317
+ return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr.get()));
318
+ }
319
+ };
320
+
321
+ template <typename Enum>
322
+ struct hash<Enum, typename std::enable_if<std::is_enum<Enum>::value>::type> {
323
+ using is_avalanching = void;
324
+ auto operator()(Enum e) const noexcept -> uint64_t {
325
+ using underlying = typename std::underlying_type_t<Enum>;
326
+ return detail::wyhash::hash(static_cast<underlying>(e));
327
+ }
328
+ };
329
+
330
+ // NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
331
+ # define ANKERL_UNORDERED_DENSE_HASH_STATICCAST(T) \
332
+ template <> \
333
+ struct hash<T> { \
334
+ using is_avalanching = void; \
335
+ auto operator()(T const& obj) const noexcept -> uint64_t { \
336
+ return detail::wyhash::hash(static_cast<uint64_t>(obj)); \
337
+ } \
338
+ }
339
+
340
+ # if defined(__GNUC__) && !defined(__clang__)
341
+ # pragma GCC diagnostic push
342
+ # pragma GCC diagnostic ignored "-Wuseless-cast"
343
+ # endif
344
+ // see https://en.cppreference.com/w/cpp/utility/hash
345
+ ANKERL_UNORDERED_DENSE_HASH_STATICCAST(bool);
346
+ ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char);
347
+ ANKERL_UNORDERED_DENSE_HASH_STATICCAST(signed char);
348
+ ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned char);
349
+ # if ANKERL_UNORDERED_DENSE_CPP_VERSION >= 202002L
350
+ ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char8_t);
351
+ # endif
352
+ ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char16_t);
353
+ ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char32_t);
354
+ ANKERL_UNORDERED_DENSE_HASH_STATICCAST(wchar_t);
355
+ ANKERL_UNORDERED_DENSE_HASH_STATICCAST(short);
356
+ ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned short);
357
+ ANKERL_UNORDERED_DENSE_HASH_STATICCAST(int);
358
+ ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned int);
359
+ ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long);
360
+ ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long long);
361
+ ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long);
362
+ ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long long);
363
+
364
+ # if defined(__GNUC__) && !defined(__clang__)
365
+ # pragma GCC diagnostic pop
366
+ # endif
367
+
368
+ // bucket_type //////////////////////////////////////////////////////////
369
+
370
+ namespace bucket_type {
371
+
372
+ struct standard {
373
+ static constexpr uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint
374
+ static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint
375
+
376
+ uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash
377
+ uint32_t m_value_idx; // index into the m_values vector.
378
+ };
379
+
380
+ ANKERL_UNORDERED_DENSE_PACK(struct big {
381
+ static constexpr uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint
382
+ static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint
383
+
384
+ uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash
385
+ size_t m_value_idx; // index into the m_values vector.
386
+ });
387
+
388
+ } // namespace bucket_type
389
+
390
+ namespace detail {
391
+
392
+ struct nonesuch {};
393
+
394
+ template <class Default, class AlwaysVoid, template <class...> class Op, class... Args>
395
+ struct detector {
396
+ using value_t = std::false_type;
397
+ using type = Default;
398
+ };
399
+
400
+ template <class Default, template <class...> class Op, class... Args>
401
+ struct detector<Default, std::void_t<Op<Args...>>, Op, Args...> {
402
+ using value_t = std::true_type;
403
+ using type = Op<Args...>;
404
+ };
405
+
406
+ template <template <class...> class Op, class... Args>
407
+ using is_detected = typename detail::detector<detail::nonesuch, void, Op, Args...>::value_t;
408
+
409
+ template <template <class...> class Op, class... Args>
410
+ constexpr bool is_detected_v = is_detected<Op, Args...>::value;
411
+
412
+ template <typename T>
413
+ using detect_avalanching = typename T::is_avalanching;
414
+
415
+ template <typename T>
416
+ using detect_is_transparent = typename T::is_transparent;
417
+
418
+ template <typename T>
419
+ using detect_iterator = typename T::iterator;
420
+
421
+ template <typename T>
422
+ using detect_reserve = decltype(std::declval<T&>().reserve(size_t{}));
423
+
424
+ // enable_if helpers
425
+
426
+ template <typename Mapped>
427
+ constexpr bool is_map_v = !std::is_void_v<Mapped>;
428
+
429
+ // clang-format off
430
+ template <typename Hash, typename KeyEqual>
431
+ constexpr bool is_transparent_v = is_detected_v<detect_is_transparent, Hash>&& is_detected_v<detect_is_transparent, KeyEqual>;
432
+ // clang-format on
433
+
434
+ template <typename From, typename To1, typename To2>
435
+ constexpr bool is_neither_convertible_v = !std::is_convertible_v<From, To1> && !std::is_convertible_v<From, To2>;
436
+
437
+ template <typename T>
438
+ constexpr bool has_reserve = is_detected_v<detect_reserve, T>;
439
+
440
+ // base type for map has mapped_type
441
+ template <class T>
442
+ struct base_table_type_map {
443
+ using mapped_type = T;
444
+ };
445
+
446
+ // base type for set doesn't have mapped_type
447
+ struct base_table_type_set {};
448
+
449
+ // This is it, the table. Doubles as map and set, and uses `void` for T when its used as a set.
450
+ template <class Key,
451
+ class T, // when void, treat it as a set.
452
+ class Hash,
453
+ class KeyEqual,
454
+ class AllocatorOrContainer,
455
+ class Bucket>
456
+ class table : public std::conditional_t<is_map_v<T>, base_table_type_map<T>, base_table_type_set> {
457
+ public:
458
+ using value_container_type = std::conditional_t<
459
+ is_detected_v<detect_iterator, AllocatorOrContainer>,
460
+ AllocatorOrContainer,
461
+ typename std::vector<typename std::conditional_t<is_map_v<T>, std::pair<Key, T>, Key>, AllocatorOrContainer>>;
462
+
463
+ private:
464
+ using bucket_alloc =
465
+ typename std::allocator_traits<typename value_container_type::allocator_type>::template rebind_alloc<Bucket>;
466
+ using bucket_alloc_traits = std::allocator_traits<bucket_alloc>;
467
+
468
+ static constexpr uint8_t initial_shifts = 64 - 3; // 2^(64-m_shift) number of buckets
469
+ static constexpr float default_max_load_factor = 0.8F;
470
+
471
+ public:
472
+ using key_type = Key;
473
+ using value_type = typename value_container_type::value_type;
474
+ using size_type = typename value_container_type::size_type;
475
+ using difference_type = typename value_container_type::difference_type;
476
+ using hasher = Hash;
477
+ using key_equal = KeyEqual;
478
+ using allocator_type = typename value_container_type::allocator_type;
479
+ using reference = typename value_container_type::reference;
480
+ using const_reference = typename value_container_type::const_reference;
481
+ using pointer = typename value_container_type::pointer;
482
+ using const_pointer = typename value_container_type::const_pointer;
483
+ using const_iterator = typename value_container_type::const_iterator;
484
+ using iterator = std::conditional_t<is_map_v<T>, typename value_container_type::iterator, const_iterator>;
485
+ using bucket_type = Bucket;
486
+
487
+ private:
488
+ using value_idx_type = decltype(Bucket::m_value_idx);
489
+ using dist_and_fingerprint_type = decltype(Bucket::m_dist_and_fingerprint);
490
+
491
+ static_assert(std::is_trivially_destructible_v<Bucket>, "assert there's no need to call destructor / std::destroy");
492
+ static_assert(std::is_trivially_copyable_v<Bucket>, "assert we can just memset / memcpy");
493
+
494
+ value_container_type m_values{}; // Contains all the key-value pairs in one densely stored container. No holes.
495
+ typename std::allocator_traits<bucket_alloc>::pointer m_buckets{};
496
+ size_t m_num_buckets = 0;
497
+ size_t m_max_bucket_capacity = 0;
498
+ float m_max_load_factor = default_max_load_factor;
499
+ Hash m_hash{};
500
+ KeyEqual m_equal{};
501
+ uint8_t m_shifts = initial_shifts;
502
+
503
+ [[nodiscard]] auto next(value_idx_type bucket_idx) const -> value_idx_type {
504
+ return ANKERL_UNORDERED_DENSE_UNLIKELY(bucket_idx + 1U == m_num_buckets)
505
+ ? 0
506
+ : static_cast<value_idx_type>(bucket_idx + 1U);
507
+ }
508
+
509
+ // Helper to access bucket through pointer types
510
+ [[nodiscard]] static constexpr auto at(typename std::allocator_traits<bucket_alloc>::pointer bucket_ptr, size_t offset)
511
+ -> Bucket& {
512
+ return *(bucket_ptr + static_cast<typename std::allocator_traits<bucket_alloc>::difference_type>(offset));
513
+ }
514
+
515
+ // use the dist_inc and dist_dec functions so that uint16_t types work without warning
516
+ [[nodiscard]] static constexpr auto dist_inc(dist_and_fingerprint_type x) -> dist_and_fingerprint_type {
517
+ return static_cast<dist_and_fingerprint_type>(x + Bucket::dist_inc);
518
+ }
519
+
520
+ [[nodiscard]] static constexpr auto dist_dec(dist_and_fingerprint_type x) -> dist_and_fingerprint_type {
521
+ return static_cast<dist_and_fingerprint_type>(x - Bucket::dist_inc);
522
+ }
523
+
524
+ // The goal of mixed_hash is to always produce a high quality 64bit hash.
525
+ template <typename K>
526
+ [[nodiscard]] constexpr auto mixed_hash(K const& key) const -> uint64_t {
527
+ if constexpr (is_detected_v<detect_avalanching, Hash>) {
528
+ // we know that the hash is good because is_avalanching.
529
+ if constexpr (sizeof(decltype(m_hash(key))) < sizeof(uint64_t)) {
530
+ // 32bit hash and is_avalanching => multiply with a constant to avalanche bits upwards
531
+ return m_hash(key) * UINT64_C(0x9ddfea08eb382d69);
532
+ } else {
533
+ // 64bit and is_avalanching => only use the hash itself.
534
+ return m_hash(key);
535
+ }
536
+ } else {
537
+ // not is_avalanching => apply wyhash
538
+ return wyhash::hash(m_hash(key));
539
+ }
540
+ }
541
+
542
+ [[nodiscard]] constexpr auto dist_and_fingerprint_from_hash(uint64_t hash) const -> dist_and_fingerprint_type {
543
+ return Bucket::dist_inc | (static_cast<dist_and_fingerprint_type>(hash) & Bucket::fingerprint_mask);
544
+ }
545
+
546
+ [[nodiscard]] constexpr auto bucket_idx_from_hash(uint64_t hash) const -> value_idx_type {
547
+ return static_cast<value_idx_type>(hash >> m_shifts);
548
+ }
549
+
550
+ [[nodiscard]] static constexpr auto get_key(value_type const& vt) -> key_type const& {
551
+ if constexpr (is_map_v<T>) {
552
+ return vt.first;
553
+ } else {
554
+ return vt;
555
+ }
556
+ }
557
+
558
+ template <typename K>
559
+ [[nodiscard]] auto next_while_less(K const& key) const -> Bucket {
560
+ auto hash = mixed_hash(key);
561
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
562
+ auto bucket_idx = bucket_idx_from_hash(hash);
563
+
564
+ while (dist_and_fingerprint < at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
565
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
566
+ bucket_idx = next(bucket_idx);
567
+ }
568
+ return {dist_and_fingerprint, bucket_idx};
569
+ }
570
+
571
+ void place_and_shift_up(Bucket bucket, value_idx_type place) {
572
+ while (0 != at(m_buckets, place).m_dist_and_fingerprint) {
573
+ bucket = std::exchange(at(m_buckets, place), bucket);
574
+ bucket.m_dist_and_fingerprint = dist_inc(bucket.m_dist_and_fingerprint);
575
+ place = next(place);
576
+ }
577
+ at(m_buckets, place) = bucket;
578
+ }
579
+
580
+ [[nodiscard]] static constexpr auto calc_num_buckets(uint8_t shifts) -> size_t {
581
+ return (std::min)(max_bucket_count(), size_t{1} << (64U - shifts));
582
+ }
583
+
584
+ [[nodiscard]] constexpr auto calc_shifts_for_size(size_t s) const -> uint8_t {
585
+ auto shifts = initial_shifts;
586
+ while (shifts > 0 && static_cast<size_t>(static_cast<float>(calc_num_buckets(shifts)) * max_load_factor()) < s) {
587
+ --shifts;
588
+ }
589
+ return shifts;
590
+ }
591
+
592
+ // assumes m_values has data, m_buckets=m_buckets_end=nullptr, m_shifts is INITIAL_SHIFTS
593
+ void copy_buckets(table const& other) {
594
+ if (!empty()) {
595
+ m_shifts = other.m_shifts;
596
+ allocate_buckets_from_shift();
597
+ std::memcpy(m_buckets, other.m_buckets, sizeof(Bucket) * bucket_count());
598
+ }
599
+ }
600
+
601
+ /**
602
+ * True when no element can be added any more without increasing the size
603
+ */
604
+ [[nodiscard]] auto is_full() const -> bool {
605
+ return size() >= m_max_bucket_capacity;
606
+ }
607
+
608
+ void deallocate_buckets() {
609
+ auto ba = bucket_alloc(m_values.get_allocator());
610
+ if (nullptr != m_buckets) {
611
+ bucket_alloc_traits::deallocate(ba, m_buckets, bucket_count());
612
+ }
613
+ m_buckets = nullptr;
614
+ m_num_buckets = 0;
615
+ m_max_bucket_capacity = 0;
616
+ }
617
+
618
+ void allocate_buckets_from_shift() {
619
+ auto ba = bucket_alloc(m_values.get_allocator());
620
+ m_num_buckets = calc_num_buckets(m_shifts);
621
+ m_buckets = bucket_alloc_traits::allocate(ba, m_num_buckets);
622
+ if (m_num_buckets == max_bucket_count()) {
623
+ // reached the maximum, make sure we can use each bucket
624
+ m_max_bucket_capacity = max_bucket_count();
625
+ } else {
626
+ m_max_bucket_capacity = static_cast<value_idx_type>(static_cast<float>(m_num_buckets) * max_load_factor());
627
+ }
628
+ }
629
+
630
+ void clear_buckets() {
631
+ if (m_buckets != nullptr) {
632
+ std::memset(&*m_buckets, 0, sizeof(Bucket) * bucket_count());
633
+ }
634
+ }
635
+
636
+ void clear_and_fill_buckets_from_values() {
637
+ clear_buckets();
638
+ for (value_idx_type value_idx = 0, end_idx = static_cast<value_idx_type>(m_values.size()); value_idx < end_idx;
639
+ ++value_idx) {
640
+ auto const& key = get_key(m_values[value_idx]);
641
+ auto [dist_and_fingerprint, bucket] = next_while_less(key);
642
+
643
+ // we know for certain that key has not yet been inserted, so no need to check it.
644
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket);
645
+ }
646
+ }
647
+
648
+ void increase_size() {
649
+ if (ANKERL_UNORDERED_DENSE_UNLIKELY(m_max_bucket_capacity == max_bucket_count())) {
650
+ on_error_bucket_overflow();
651
+ }
652
+ --m_shifts;
653
+ deallocate_buckets();
654
+ allocate_buckets_from_shift();
655
+ clear_and_fill_buckets_from_values();
656
+ }
657
+
658
+ void do_erase(value_idx_type bucket_idx) {
659
+ auto const value_idx_to_remove = at(m_buckets, bucket_idx).m_value_idx;
660
+
661
+ // shift down until either empty or an element with correct spot is found
662
+ auto next_bucket_idx = next(bucket_idx);
663
+ while (at(m_buckets, next_bucket_idx).m_dist_and_fingerprint >= Bucket::dist_inc * 2) {
664
+ at(m_buckets, bucket_idx) = {dist_dec(at(m_buckets, next_bucket_idx).m_dist_and_fingerprint),
665
+ at(m_buckets, next_bucket_idx).m_value_idx};
666
+ bucket_idx = std::exchange(next_bucket_idx, next(next_bucket_idx));
667
+ }
668
+ at(m_buckets, bucket_idx) = {};
669
+
670
+ // update m_values
671
+ if (value_idx_to_remove != m_values.size() - 1) {
672
+ // no luck, we'll have to replace the value with the last one and update the index accordingly
673
+ auto& val = m_values[value_idx_to_remove];
674
+ val = std::move(m_values.back());
675
+
676
+ // update the values_idx of the moved entry. No need to play the info game, just look until we find the values_idx
677
+ auto mh = mixed_hash(get_key(val));
678
+ bucket_idx = bucket_idx_from_hash(mh);
679
+
680
+ auto const values_idx_back = static_cast<value_idx_type>(m_values.size() - 1);
681
+ while (values_idx_back != at(m_buckets, bucket_idx).m_value_idx) {
682
+ bucket_idx = next(bucket_idx);
683
+ }
684
+ at(m_buckets, bucket_idx).m_value_idx = value_idx_to_remove;
685
+ }
686
+ m_values.pop_back();
687
+ }
688
+
689
+ template <typename K>
690
+ auto do_erase_key(K&& key) -> size_t {
691
+ if (empty()) {
692
+ return 0;
693
+ }
694
+
695
+ auto [dist_and_fingerprint, bucket_idx] = next_while_less(key);
696
+
697
+ while (dist_and_fingerprint == at(m_buckets, bucket_idx).m_dist_and_fingerprint &&
698
+ !m_equal(key, get_key(m_values[at(m_buckets, bucket_idx).m_value_idx]))) {
699
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
700
+ bucket_idx = next(bucket_idx);
701
+ }
702
+
703
+ if (dist_and_fingerprint != at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
704
+ return 0;
705
+ }
706
+ do_erase(bucket_idx);
707
+ return 1;
708
+ }
709
+
710
+ template <class K, class M>
711
+ auto do_insert_or_assign(K&& key, M&& mapped) -> std::pair<iterator, bool> {
712
+ auto it_isinserted = try_emplace(std::forward<K>(key), std::forward<M>(mapped));
713
+ if (!it_isinserted.second) {
714
+ it_isinserted.first->second = std::forward<M>(mapped);
715
+ }
716
+ return it_isinserted;
717
+ }
718
+
719
+ template <typename K, typename... Args>
720
+ auto do_place_element(dist_and_fingerprint_type dist_and_fingerprint, value_idx_type bucket_idx, K&& key, Args&&... args)
721
+ -> std::pair<iterator, bool> {
722
+
723
+ // emplace the new value. If that throws an exception, no harm done; index is still in a valid state
724
+ m_values.emplace_back(std::piecewise_construct,
725
+ std::forward_as_tuple(std::forward<K>(key)),
726
+ std::forward_as_tuple(std::forward<Args>(args)...));
727
+
728
+ // place element and shift up until we find an empty spot
729
+ auto value_idx = static_cast<value_idx_type>(m_values.size() - 1);
730
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
731
+ return {begin() + static_cast<difference_type>(value_idx), true};
732
+ }
733
+
734
+ template <typename K, typename... Args>
735
+ auto do_try_emplace(K&& key, Args&&... args) -> std::pair<iterator, bool> {
736
+ if (ANKERL_UNORDERED_DENSE_UNLIKELY(is_full())) {
737
+ increase_size();
738
+ }
739
+
740
+ auto hash = mixed_hash(key);
741
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
742
+ auto bucket_idx = bucket_idx_from_hash(hash);
743
+
744
+ while (true) {
745
+ auto* bucket = &at(m_buckets, bucket_idx);
746
+ if (dist_and_fingerprint == bucket->m_dist_and_fingerprint) {
747
+ if (m_equal(key, m_values[bucket->m_value_idx].first)) {
748
+ return {begin() + static_cast<difference_type>(bucket->m_value_idx), false};
749
+ }
750
+ } else if (dist_and_fingerprint > bucket->m_dist_and_fingerprint) {
751
+ return do_place_element(dist_and_fingerprint, bucket_idx, std::forward<K>(key), std::forward<Args>(args)...);
752
+ }
753
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
754
+ bucket_idx = next(bucket_idx);
755
+ }
756
+ }
757
+
758
+ template <typename K>
759
+ auto do_find(K const& key) -> iterator {
760
+ if (ANKERL_UNORDERED_DENSE_UNLIKELY(empty())) {
761
+ return end();
762
+ }
763
+
764
+ auto mh = mixed_hash(key);
765
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(mh);
766
+ auto bucket_idx = bucket_idx_from_hash(mh);
767
+ auto* bucket = &at(m_buckets, bucket_idx);
768
+
769
+ // unrolled loop. *Always* check a few directly, then enter the loop. This is faster.
770
+ if (dist_and_fingerprint == bucket->m_dist_and_fingerprint && m_equal(key, get_key(m_values[bucket->m_value_idx]))) {
771
+ return begin() + static_cast<difference_type>(bucket->m_value_idx);
772
+ }
773
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
774
+ bucket_idx = next(bucket_idx);
775
+ bucket = &at(m_buckets, bucket_idx);
776
+
777
+ if (dist_and_fingerprint == bucket->m_dist_and_fingerprint && m_equal(key, get_key(m_values[bucket->m_value_idx]))) {
778
+ return begin() + static_cast<difference_type>(bucket->m_value_idx);
779
+ }
780
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
781
+ bucket_idx = next(bucket_idx);
782
+ bucket = &at(m_buckets, bucket_idx);
783
+
784
+ while (true) {
785
+ if (dist_and_fingerprint == bucket->m_dist_and_fingerprint) {
786
+ if (m_equal(key, get_key(m_values[bucket->m_value_idx]))) {
787
+ return begin() + static_cast<difference_type>(bucket->m_value_idx);
788
+ }
789
+ } else if (dist_and_fingerprint > bucket->m_dist_and_fingerprint) {
790
+ return end();
791
+ }
792
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
793
+ bucket_idx = next(bucket_idx);
794
+ bucket = &at(m_buckets, bucket_idx);
795
+ }
796
+ }
797
+
798
+ template <typename K>
799
+ auto do_find(K const& key) const -> const_iterator {
800
+ return const_cast<table*>(this)->do_find(key); // NOLINT(cppcoreguidelines-pro-type-const-cast)
801
+ }
802
+
803
+ template <typename K, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
804
+ auto do_at(K const& key) -> Q& {
805
+ if (auto it = find(key); ANKERL_UNORDERED_DENSE_LIKELY(end() != it)) {
806
+ return it->second;
807
+ }
808
+ on_error_key_not_found();
809
+ }
810
+
811
+ template <typename K, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
812
+ auto do_at(K const& key) const -> Q const& {
813
+ return const_cast<table*>(this)->at(key); // NOLINT(cppcoreguidelines-pro-type-const-cast)
814
+ }
815
+
816
+ public:
817
+ table()
818
+ : table(0) {}
819
+
820
+ explicit table(size_t bucket_count,
821
+ Hash const& hash = Hash(),
822
+ KeyEqual const& equal = KeyEqual(),
823
+ allocator_type const& alloc_or_container = allocator_type())
824
+ : m_values(alloc_or_container)
825
+ , m_hash(hash)
826
+ , m_equal(equal) {
827
+ if (0 != bucket_count) {
828
+ reserve(bucket_count);
829
+ }
830
+ }
831
+
832
+ table(size_t bucket_count, allocator_type const& alloc)
833
+ : table(bucket_count, Hash(), KeyEqual(), alloc) {}
834
+
835
+ table(size_t bucket_count, Hash const& hash, allocator_type const& alloc)
836
+ : table(bucket_count, hash, KeyEqual(), alloc) {}
837
+
838
+ explicit table(allocator_type const& alloc)
839
+ : table(0, Hash(), KeyEqual(), alloc) {}
840
+
841
+ template <class InputIt>
842
+ table(InputIt first,
843
+ InputIt last,
844
+ size_type bucket_count = 0,
845
+ Hash const& hash = Hash(),
846
+ KeyEqual const& equal = KeyEqual(),
847
+ allocator_type const& alloc = allocator_type())
848
+ : table(bucket_count, hash, equal, alloc) {
849
+ insert(first, last);
850
+ }
851
+
852
+ template <class InputIt>
853
+ table(InputIt first, InputIt last, size_type bucket_count, allocator_type const& alloc)
854
+ : table(first, last, bucket_count, Hash(), KeyEqual(), alloc) {}
855
+
856
+ template <class InputIt>
857
+ table(InputIt first, InputIt last, size_type bucket_count, Hash const& hash, allocator_type const& alloc)
858
+ : table(first, last, bucket_count, hash, KeyEqual(), alloc) {}
859
+
860
+ table(table const& other)
861
+ : table(other, other.m_values.get_allocator()) {}
862
+
863
+ table(table const& other, allocator_type const& alloc)
864
+ : m_values(other.m_values, alloc)
865
+ , m_max_load_factor(other.m_max_load_factor)
866
+ , m_hash(other.m_hash)
867
+ , m_equal(other.m_equal) {
868
+ copy_buckets(other);
869
+ }
870
+
871
+ table(table&& other) noexcept
872
+ : table(std::move(other), other.m_values.get_allocator()) {}
873
+
874
+ table(table&& other, allocator_type const& alloc) noexcept
875
+ : m_values(std::move(other.m_values), alloc)
876
+ , m_buckets(std::exchange(other.m_buckets, nullptr))
877
+ , m_num_buckets(std::exchange(other.m_num_buckets, 0))
878
+ , m_max_bucket_capacity(std::exchange(other.m_max_bucket_capacity, 0))
879
+ , m_max_load_factor(std::exchange(other.m_max_load_factor, default_max_load_factor))
880
+ , m_hash(std::exchange(other.m_hash, {}))
881
+ , m_equal(std::exchange(other.m_equal, {}))
882
+ , m_shifts(std::exchange(other.m_shifts, initial_shifts)) {
883
+ other.m_values.clear();
884
+ }
885
+
886
+ table(std::initializer_list<value_type> ilist,
887
+ size_t bucket_count = 0,
888
+ Hash const& hash = Hash(),
889
+ KeyEqual const& equal = KeyEqual(),
890
+ allocator_type const& alloc = allocator_type())
891
+ : table(bucket_count, hash, equal, alloc) {
892
+ insert(ilist);
893
+ }
894
+
895
+ table(std::initializer_list<value_type> ilist, size_type bucket_count, allocator_type const& alloc)
896
+ : table(ilist, bucket_count, Hash(), KeyEqual(), alloc) {}
897
+
898
+ table(std::initializer_list<value_type> init, size_type bucket_count, Hash const& hash, allocator_type const& alloc)
899
+ : table(init, bucket_count, hash, KeyEqual(), alloc) {}
900
+
901
+ ~table() {
902
+ if (nullptr != m_buckets) {
903
+ auto ba = bucket_alloc(m_values.get_allocator());
904
+ bucket_alloc_traits::deallocate(ba, m_buckets, bucket_count());
905
+ }
906
+ }
907
+
908
+ auto operator=(table const& other) -> table& {
909
+ if (&other != this) {
910
+ deallocate_buckets(); // deallocate before m_values is set (might have another allocator)
911
+ m_values = other.m_values;
912
+ m_max_load_factor = other.m_max_load_factor;
913
+ m_hash = other.m_hash;
914
+ m_equal = other.m_equal;
915
+ m_shifts = initial_shifts;
916
+ copy_buckets(other);
917
+ }
918
+ return *this;
919
+ }
920
+
921
+ auto operator=(table&& other) noexcept(
922
+ noexcept(std::is_nothrow_move_assignable_v<value_container_type>&& std::is_nothrow_move_assignable_v<Hash>&&
923
+ std::is_nothrow_move_assignable_v<KeyEqual>)) -> table& {
924
+ if (&other != this) {
925
+ deallocate_buckets(); // deallocate before m_values is set (might have another allocator)
926
+ m_values = std::move(other.m_values);
927
+ m_buckets = std::exchange(other.m_buckets, nullptr);
928
+ m_num_buckets = std::exchange(other.m_num_buckets, 0);
929
+ m_max_bucket_capacity = std::exchange(other.m_max_bucket_capacity, 0);
930
+ m_max_load_factor = std::exchange(other.m_max_load_factor, default_max_load_factor);
931
+ m_hash = std::exchange(other.m_hash, {});
932
+ m_equal = std::exchange(other.m_equal, {});
933
+ m_shifts = std::exchange(other.m_shifts, initial_shifts);
934
+ other.m_values.clear();
935
+ }
936
+ return *this;
937
+ }
938
+
939
+ auto operator=(std::initializer_list<value_type> ilist) -> table& {
940
+ clear();
941
+ insert(ilist);
942
+ return *this;
943
+ }
944
+
945
+ auto get_allocator() const noexcept -> allocator_type {
946
+ return m_values.get_allocator();
947
+ }
948
+
949
+ // iterators //////////////////////////////////////////////////////////////
950
+
951
+ auto begin() noexcept -> iterator {
952
+ return m_values.begin();
953
+ }
954
+
955
+ auto begin() const noexcept -> const_iterator {
956
+ return m_values.begin();
957
+ }
958
+
959
+ auto cbegin() const noexcept -> const_iterator {
960
+ return m_values.cbegin();
961
+ }
962
+
963
+ auto end() noexcept -> iterator {
964
+ return m_values.end();
965
+ }
966
+
967
+ auto cend() const noexcept -> const_iterator {
968
+ return m_values.cend();
969
+ }
970
+
971
+ auto end() const noexcept -> const_iterator {
972
+ return m_values.end();
973
+ }
974
+
975
+ // capacity ///////////////////////////////////////////////////////////////
976
+
977
+ [[nodiscard]] auto empty() const noexcept -> bool {
978
+ return m_values.empty();
979
+ }
980
+
981
+ [[nodiscard]] auto size() const noexcept -> size_t {
982
+ return m_values.size();
983
+ }
984
+
985
+ [[nodiscard]] static constexpr auto max_size() noexcept -> size_t {
986
+ if constexpr ((std::numeric_limits<value_idx_type>::max)() == (std::numeric_limits<size_t>::max)()) {
987
+ return size_t{1} << (sizeof(value_idx_type) * 8 - 1);
988
+ } else {
989
+ return size_t{1} << (sizeof(value_idx_type) * 8);
990
+ }
991
+ }
992
+
993
+ // modifiers //////////////////////////////////////////////////////////////
994
+
995
+ void clear() {
996
+ m_values.clear();
997
+ clear_buckets();
998
+ }
999
+
1000
+ auto insert(value_type const& value) -> std::pair<iterator, bool> {
1001
+ return emplace(value);
1002
+ }
1003
+
1004
+ auto insert(value_type&& value) -> std::pair<iterator, bool> {
1005
+ return emplace(std::move(value));
1006
+ }
1007
+
1008
+ template <class P, std::enable_if_t<std::is_constructible_v<value_type, P&&>, bool> = true>
1009
+ auto insert(P&& value) -> std::pair<iterator, bool> {
1010
+ return emplace(std::forward<P>(value));
1011
+ }
1012
+
1013
+ auto insert(const_iterator /*hint*/, value_type const& value) -> iterator {
1014
+ return insert(value).first;
1015
+ }
1016
+
1017
+ auto insert(const_iterator /*hint*/, value_type&& value) -> iterator {
1018
+ return insert(std::move(value)).first;
1019
+ }
1020
+
1021
+ template <class P, std::enable_if_t<std::is_constructible_v<value_type, P&&>, bool> = true>
1022
+ auto insert(const_iterator /*hint*/, P&& value) -> iterator {
1023
+ return insert(std::forward<P>(value)).first;
1024
+ }
1025
+
1026
+ template <class InputIt>
1027
+ void insert(InputIt first, InputIt last) {
1028
+ while (first != last) {
1029
+ insert(*first);
1030
+ ++first;
1031
+ }
1032
+ }
1033
+
1034
+ void insert(std::initializer_list<value_type> ilist) {
1035
+ insert(ilist.begin(), ilist.end());
1036
+ }
1037
+
1038
+ // nonstandard API: *this is emptied.
1039
+ // Also see "A Standard flat_map" https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2022/p0429r9.pdf
1040
+ auto extract() && -> value_container_type {
1041
+ return std::move(m_values);
1042
+ }
1043
+
1044
+ // nonstandard API:
1045
+ // Discards the internally held container and replaces it with the one passed. Erases non-unique elements.
1046
+ auto replace(value_container_type&& container) {
1047
+ if (ANKERL_UNORDERED_DENSE_UNLIKELY(container.size() > max_size())) {
1048
+ on_error_too_many_elements();
1049
+ }
1050
+ auto shifts = calc_shifts_for_size(container.size());
1051
+ if (0 == m_num_buckets || shifts < m_shifts || container.get_allocator() != m_values.get_allocator()) {
1052
+ m_shifts = shifts;
1053
+ deallocate_buckets();
1054
+ allocate_buckets_from_shift();
1055
+ }
1056
+ clear_buckets();
1057
+
1058
+ m_values = std::move(container);
1059
+
1060
+ // can't use clear_and_fill_buckets_from_values() because container elements might not be unique
1061
+ auto value_idx = value_idx_type{};
1062
+
1063
+ // loop until we reach the end of the container. duplicated entries will be replaced with back().
1064
+ while (value_idx != static_cast<value_idx_type>(m_values.size())) {
1065
+ auto const& key = get_key(m_values[value_idx]);
1066
+
1067
+ auto hash = mixed_hash(key);
1068
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
1069
+ auto bucket_idx = bucket_idx_from_hash(hash);
1070
+
1071
+ bool key_found = false;
1072
+ while (true) {
1073
+ auto const& bucket = at(m_buckets, bucket_idx);
1074
+ if (dist_and_fingerprint > bucket.m_dist_and_fingerprint) {
1075
+ break;
1076
+ }
1077
+ if (dist_and_fingerprint == bucket.m_dist_and_fingerprint &&
1078
+ m_equal(key, m_values[bucket.m_value_idx].first)) {
1079
+ key_found = true;
1080
+ break;
1081
+ }
1082
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
1083
+ bucket_idx = next(bucket_idx);
1084
+ }
1085
+
1086
+ if (key_found) {
1087
+ if (value_idx != static_cast<value_idx_type>(m_values.size() - 1)) {
1088
+ m_values[value_idx] = std::move(m_values.back());
1089
+ }
1090
+ m_values.pop_back();
1091
+ } else {
1092
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
1093
+ ++value_idx;
1094
+ }
1095
+ }
1096
+ }
1097
+
1098
+ template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
1099
+ auto insert_or_assign(Key const& key, M&& mapped) -> std::pair<iterator, bool> {
1100
+ return do_insert_or_assign(key, std::forward<M>(mapped));
1101
+ }
1102
+
1103
+ template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
1104
+ auto insert_or_assign(Key&& key, M&& mapped) -> std::pair<iterator, bool> {
1105
+ return do_insert_or_assign(std::move(key), std::forward<M>(mapped));
1106
+ }
1107
+
1108
+ template <typename K,
1109
+ typename M,
1110
+ typename Q = T,
1111
+ typename H = Hash,
1112
+ typename KE = KeyEqual,
1113
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
1114
+ auto insert_or_assign(K&& key, M&& mapped) -> std::pair<iterator, bool> {
1115
+ return do_insert_or_assign(std::forward<K>(key), std::forward<M>(mapped));
1116
+ }
1117
+
1118
+ template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
1119
+ auto insert_or_assign(const_iterator /*hint*/, Key const& key, M&& mapped) -> iterator {
1120
+ return do_insert_or_assign(key, std::forward<M>(mapped)).first;
1121
+ }
1122
+
1123
+ template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
1124
+ auto insert_or_assign(const_iterator /*hint*/, Key&& key, M&& mapped) -> iterator {
1125
+ return do_insert_or_assign(std::move(key), std::forward<M>(mapped)).first;
1126
+ }
1127
+
1128
+ template <typename K,
1129
+ typename M,
1130
+ typename Q = T,
1131
+ typename H = Hash,
1132
+ typename KE = KeyEqual,
1133
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
1134
+ auto insert_or_assign(const_iterator /*hint*/, K&& key, M&& mapped) -> iterator {
1135
+ return do_insert_or_assign(std::forward<K>(key), std::forward<M>(mapped)).first;
1136
+ }
1137
+
1138
+ // Single arguments for unordered_set can be used without having to construct the value_type
1139
+ template <class K,
1140
+ typename Q = T,
1141
+ typename H = Hash,
1142
+ typename KE = KeyEqual,
1143
+ std::enable_if_t<!is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
1144
+ auto emplace(K&& key) -> std::pair<iterator, bool> {
1145
+ if (is_full()) {
1146
+ increase_size();
1147
+ }
1148
+
1149
+ auto hash = mixed_hash(key);
1150
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
1151
+ auto bucket_idx = bucket_idx_from_hash(hash);
1152
+
1153
+ while (dist_and_fingerprint <= at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
1154
+ if (dist_and_fingerprint == at(m_buckets, bucket_idx).m_dist_and_fingerprint &&
1155
+ m_equal(key, m_values[at(m_buckets, bucket_idx).m_value_idx])) {
1156
+ // found it, return without ever actually creating anything
1157
+ return {begin() + static_cast<difference_type>(at(m_buckets, bucket_idx).m_value_idx), false};
1158
+ }
1159
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
1160
+ bucket_idx = next(bucket_idx);
1161
+ }
1162
+
1163
+ // value is new, insert element first, so when exception happens we are in a valid state
1164
+ m_values.emplace_back(std::forward<K>(key));
1165
+ // now place the bucket and shift up until we find an empty spot
1166
+ auto value_idx = static_cast<value_idx_type>(m_values.size() - 1);
1167
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
1168
+ return {begin() + static_cast<difference_type>(value_idx), true};
1169
+ }
1170
+
1171
+ template <class... Args>
1172
+ auto emplace(Args&&... args) -> std::pair<iterator, bool> {
1173
+ if (is_full()) {
1174
+ increase_size();
1175
+ }
1176
+
1177
+ // we have to instantiate the value_type to be able to access the key.
1178
+ // 1. emplace_back the object so it is constructed. 2. If the key is already there, pop it later in the loop.
1179
+ auto& key = get_key(m_values.emplace_back(std::forward<Args>(args)...));
1180
+ auto hash = mixed_hash(key);
1181
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
1182
+ auto bucket_idx = bucket_idx_from_hash(hash);
1183
+
1184
+ while (dist_and_fingerprint <= at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
1185
+ if (dist_and_fingerprint == at(m_buckets, bucket_idx).m_dist_and_fingerprint &&
1186
+ m_equal(key, get_key(m_values[at(m_buckets, bucket_idx).m_value_idx]))) {
1187
+ m_values.pop_back(); // value was already there, so get rid of it
1188
+ return {begin() + static_cast<difference_type>(at(m_buckets, bucket_idx).m_value_idx), false};
1189
+ }
1190
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
1191
+ bucket_idx = next(bucket_idx);
1192
+ }
1193
+
1194
+ // value is new, place the bucket and shift up until we find an empty spot
1195
+ auto value_idx = static_cast<value_idx_type>(m_values.size() - 1);
1196
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
1197
+
1198
+ return {begin() + static_cast<difference_type>(value_idx), true};
1199
+ }
1200
+
1201
+ template <class... Args>
1202
+ auto emplace_hint(const_iterator /*hint*/, Args&&... args) -> iterator {
1203
+ return emplace(std::forward<Args>(args)...).first;
1204
+ }
1205
+
1206
+ template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
1207
+ auto try_emplace(Key const& key, Args&&... args) -> std::pair<iterator, bool> {
1208
+ return do_try_emplace(key, std::forward<Args>(args)...);
1209
+ }
1210
+
1211
+ template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
1212
+ auto try_emplace(Key&& key, Args&&... args) -> std::pair<iterator, bool> {
1213
+ return do_try_emplace(std::move(key), std::forward<Args>(args)...);
1214
+ }
1215
+
1216
+ template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
1217
+ auto try_emplace(const_iterator /*hint*/, Key const& key, Args&&... args) -> iterator {
1218
+ return do_try_emplace(key, std::forward<Args>(args)...).first;
1219
+ }
1220
+
1221
+ template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
1222
+ auto try_emplace(const_iterator /*hint*/, Key&& key, Args&&... args) -> iterator {
1223
+ return do_try_emplace(std::move(key), std::forward<Args>(args)...).first;
1224
+ }
1225
+
1226
+ template <
1227
+ typename K,
1228
+ typename... Args,
1229
+ typename Q = T,
1230
+ typename H = Hash,
1231
+ typename KE = KeyEqual,
1232
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE> && is_neither_convertible_v<K&&, iterator, const_iterator>,
1233
+ bool> = true>
1234
+ auto try_emplace(K&& key, Args&&... args) -> std::pair<iterator, bool> {
1235
+ return do_try_emplace(std::forward<K>(key), std::forward<Args>(args)...);
1236
+ }
1237
+
1238
+ template <
1239
+ typename K,
1240
+ typename... Args,
1241
+ typename Q = T,
1242
+ typename H = Hash,
1243
+ typename KE = KeyEqual,
1244
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE> && is_neither_convertible_v<K&&, iterator, const_iterator>,
1245
+ bool> = true>
1246
+ auto try_emplace(const_iterator /*hint*/, K&& key, Args&&... args) -> iterator {
1247
+ return do_try_emplace(std::forward<K>(key), std::forward<Args>(args)...).first;
1248
+ }
1249
+
1250
+ auto erase(iterator it) -> iterator {
1251
+ auto hash = mixed_hash(get_key(*it));
1252
+ auto bucket_idx = bucket_idx_from_hash(hash);
1253
+
1254
+ auto const value_idx_to_remove = static_cast<value_idx_type>(it - cbegin());
1255
+ while (at(m_buckets, bucket_idx).m_value_idx != value_idx_to_remove) {
1256
+ bucket_idx = next(bucket_idx);
1257
+ }
1258
+
1259
+ do_erase(bucket_idx);
1260
+ return begin() + static_cast<difference_type>(value_idx_to_remove);
1261
+ }
1262
+
1263
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
1264
+ auto erase(const_iterator it) -> iterator {
1265
+ return erase(begin() + (it - cbegin()));
1266
+ }
1267
+
1268
+ auto erase(const_iterator first, const_iterator last) -> iterator {
1269
+ auto const idx_first = first - cbegin();
1270
+ auto const idx_last = last - cbegin();
1271
+ auto const first_to_last = std::distance(first, last);
1272
+ auto const last_to_end = std::distance(last, cend());
1273
+
1274
+ // remove elements from left to right which moves elements from the end back
1275
+ auto const mid = idx_first + (std::min)(first_to_last, last_to_end);
1276
+ auto idx = idx_first;
1277
+ while (idx != mid) {
1278
+ erase(begin() + idx);
1279
+ ++idx;
1280
+ }
1281
+
1282
+ // all elements from the right are moved, now remove the last element until all done
1283
+ idx = idx_last;
1284
+ while (idx != mid) {
1285
+ --idx;
1286
+ erase(begin() + idx);
1287
+ }
1288
+
1289
+ return begin() + idx_first;
1290
+ }
1291
+
1292
+ auto erase(Key const& key) -> size_t {
1293
+ return do_erase_key(key);
1294
+ }
1295
+
1296
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
1297
+ auto erase(K&& key) -> size_t {
1298
+ return do_erase_key(std::forward<K>(key));
1299
+ }
1300
+
1301
+ void swap(table& other) noexcept(noexcept(std::is_nothrow_swappable_v<value_container_type>&&
1302
+ std::is_nothrow_swappable_v<Hash>&& std::is_nothrow_swappable_v<KeyEqual>)) {
1303
+ using std::swap;
1304
+ swap(other, *this);
1305
+ }
1306
+
1307
+ // lookup /////////////////////////////////////////////////////////////////
1308
+
1309
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
1310
+ auto at(key_type const& key) -> Q& {
1311
+ return do_at(key);
1312
+ }
1313
+
1314
+ template <typename K,
1315
+ typename Q = T,
1316
+ typename H = Hash,
1317
+ typename KE = KeyEqual,
1318
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
1319
+ auto at(K const& key) -> Q& {
1320
+ return do_at(key);
1321
+ }
1322
+
1323
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
1324
+ auto at(key_type const& key) const -> Q const& {
1325
+ return do_at(key);
1326
+ }
1327
+
1328
+ template <typename K,
1329
+ typename Q = T,
1330
+ typename H = Hash,
1331
+ typename KE = KeyEqual,
1332
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
1333
+ auto at(K const& key) const -> Q const& {
1334
+ return do_at(key);
1335
+ }
1336
+
1337
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
1338
+ auto operator[](Key const& key) -> Q& {
1339
+ return try_emplace(key).first->second;
1340
+ }
1341
+
1342
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
1343
+ auto operator[](Key&& key) -> Q& {
1344
+ return try_emplace(std::move(key)).first->second;
1345
+ }
1346
+
1347
+ template <typename K,
1348
+ typename Q = T,
1349
+ typename H = Hash,
1350
+ typename KE = KeyEqual,
1351
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
1352
+ auto operator[](K&& key) -> Q& {
1353
+ return try_emplace(std::forward<K>(key)).first->second;
1354
+ }
1355
+
1356
+ auto count(Key const& key) const -> size_t {
1357
+ return find(key) == end() ? 0 : 1;
1358
+ }
1359
+
1360
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
1361
+ auto count(K const& key) const -> size_t {
1362
+ return find(key) == end() ? 0 : 1;
1363
+ }
1364
+
1365
+ auto find(Key const& key) -> iterator {
1366
+ return do_find(key);
1367
+ }
1368
+
1369
+ auto find(Key const& key) const -> const_iterator {
1370
+ return do_find(key);
1371
+ }
1372
+
1373
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
1374
+ auto find(K const& key) -> iterator {
1375
+ return do_find(key);
1376
+ }
1377
+
1378
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
1379
+ auto find(K const& key) const -> const_iterator {
1380
+ return do_find(key);
1381
+ }
1382
+
1383
+ auto contains(Key const& key) const -> bool {
1384
+ return find(key) != end();
1385
+ }
1386
+
1387
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
1388
+ auto contains(K const& key) const -> bool {
1389
+ return find(key) != end();
1390
+ }
1391
+
1392
+ auto equal_range(Key const& key) -> std::pair<iterator, iterator> {
1393
+ auto it = do_find(key);
1394
+ return {it, it == end() ? end() : it + 1};
1395
+ }
1396
+
1397
+ auto equal_range(const Key& key) const -> std::pair<const_iterator, const_iterator> {
1398
+ auto it = do_find(key);
1399
+ return {it, it == end() ? end() : it + 1};
1400
+ }
1401
+
1402
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
1403
+ auto equal_range(K const& key) -> std::pair<iterator, iterator> {
1404
+ auto it = do_find(key);
1405
+ return {it, it == end() ? end() : it + 1};
1406
+ }
1407
+
1408
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
1409
+ auto equal_range(K const& key) const -> std::pair<const_iterator, const_iterator> {
1410
+ auto it = do_find(key);
1411
+ return {it, it == end() ? end() : it + 1};
1412
+ }
1413
+
1414
+ // bucket interface ///////////////////////////////////////////////////////
1415
+
1416
+ auto bucket_count() const noexcept -> size_t { // NOLINT(modernize-use-nodiscard)
1417
+ return m_num_buckets;
1418
+ }
1419
+
1420
+ static constexpr auto max_bucket_count() noexcept -> size_t { // NOLINT(modernize-use-nodiscard)
1421
+ return max_size();
1422
+ }
1423
+
1424
+ // hash policy ////////////////////////////////////////////////////////////
1425
+
1426
+ [[nodiscard]] auto load_factor() const -> float {
1427
+ return bucket_count() ? static_cast<float>(size()) / static_cast<float>(bucket_count()) : 0.0F;
1428
+ }
1429
+
1430
+ [[nodiscard]] auto max_load_factor() const -> float {
1431
+ return m_max_load_factor;
1432
+ }
1433
+
1434
+ void max_load_factor(float ml) {
1435
+ m_max_load_factor = ml;
1436
+ if (m_num_buckets != max_bucket_count()) {
1437
+ m_max_bucket_capacity = static_cast<value_idx_type>(static_cast<float>(bucket_count()) * max_load_factor());
1438
+ }
1439
+ }
1440
+
1441
+ void rehash(size_t count) {
1442
+ count = (std::min)(count, max_size());
1443
+ auto shifts = calc_shifts_for_size((std::max)(count, size()));
1444
+ if (shifts != m_shifts) {
1445
+ m_shifts = shifts;
1446
+ deallocate_buckets();
1447
+ m_values.shrink_to_fit();
1448
+ allocate_buckets_from_shift();
1449
+ clear_and_fill_buckets_from_values();
1450
+ }
1451
+ }
1452
+
1453
+ void reserve(size_t capa) {
1454
+ capa = (std::min)(capa, max_size());
1455
+ if constexpr (has_reserve<value_container_type>) {
1456
+ // std::deque doesn't have reserve(). Make sure we only call when available
1457
+ m_values.reserve(capa);
1458
+ }
1459
+ auto shifts = calc_shifts_for_size((std::max)(capa, size()));
1460
+ if (0 == m_num_buckets || shifts < m_shifts) {
1461
+ m_shifts = shifts;
1462
+ deallocate_buckets();
1463
+ allocate_buckets_from_shift();
1464
+ clear_and_fill_buckets_from_values();
1465
+ }
1466
+ }
1467
+
1468
+ // observers //////////////////////////////////////////////////////////////
1469
+
1470
+ auto hash_function() const -> hasher {
1471
+ return m_hash;
1472
+ }
1473
+
1474
+ auto key_eq() const -> key_equal {
1475
+ return m_equal;
1476
+ }
1477
+
1478
+ // nonstandard API: expose the underlying values container
1479
+ [[nodiscard]] auto values() const noexcept -> value_container_type const& {
1480
+ return m_values;
1481
+ }
1482
+
1483
+ // non-member functions ///////////////////////////////////////////////////
1484
+
1485
+ friend auto operator==(table const& a, table const& b) -> bool {
1486
+ if (&a == &b) {
1487
+ return true;
1488
+ }
1489
+ if (a.size() != b.size()) {
1490
+ return false;
1491
+ }
1492
+ for (auto const& b_entry : b) {
1493
+ auto it = a.find(get_key(b_entry));
1494
+ if constexpr (is_map_v<T>) {
1495
+ // map: check that key is here, then also check that value is the same
1496
+ if (a.end() == it || !(b_entry.second == it->second)) {
1497
+ return false;
1498
+ }
1499
+ } else {
1500
+ // set: only check that the key is here
1501
+ if (a.end() == it) {
1502
+ return false;
1503
+ }
1504
+ }
1505
+ }
1506
+ return true;
1507
+ }
1508
+
1509
+ friend auto operator!=(table const& a, table const& b) -> bool {
1510
+ return !(a == b);
1511
+ }
1512
+ };
1513
+
1514
+ } // namespace detail
1515
+
1516
+ template <class Key,
1517
+ class T,
1518
+ class Hash = hash<Key>,
1519
+ class KeyEqual = std::equal_to<Key>,
1520
+ class AllocatorOrContainer = std::allocator<std::pair<Key, T>>,
1521
+ class Bucket = bucket_type::standard>
1522
+ using map = detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket>;
1523
+
1524
+ template <class Key,
1525
+ class Hash = hash<Key>,
1526
+ class KeyEqual = std::equal_to<Key>,
1527
+ class AllocatorOrContainer = std::allocator<Key>,
1528
+ class Bucket = bucket_type::standard>
1529
+ using set = detail::table<Key, void, Hash, KeyEqual, AllocatorOrContainer, Bucket>;
1530
+
1531
+ # if ANKERL_UNORDERED_DENSE_PMR
1532
+
1533
+ namespace pmr {
1534
+
1535
+ template <class Key,
1536
+ class T,
1537
+ class Hash = hash<Key>,
1538
+ class KeyEqual = std::equal_to<Key>,
1539
+ class Bucket = bucket_type::standard>
1540
+ using map = detail::table<Key, T, Hash, KeyEqual, ANKERL_UNORDERED_DENSE_PMR_ALLOCATOR<std::pair<Key, T>>, Bucket>;
1541
+
1542
+ template <class Key, class Hash = hash<Key>, class KeyEqual = std::equal_to<Key>, class Bucket = bucket_type::standard>
1543
+ using set = detail::table<Key, void, Hash, KeyEqual, ANKERL_UNORDERED_DENSE_PMR_ALLOCATOR<Key>, Bucket>;
1544
+
1545
+ } // namespace pmr
1546
+
1547
+ # endif
1548
+
1549
+ // deduction guides ///////////////////////////////////////////////////////////
1550
+
1551
+ // deduction guides for alias templates are only possible since C++20
1552
+ // see https://en.cppreference.com/w/cpp/language/class_template_argument_deduction
1553
+
1554
+ } // namespace ANKERL_UNORDERED_DENSE_NAMESPACE
1555
+ } // namespace ankerl::unordered_dense
1556
+
1557
+ // std extensions /////////////////////////////////////////////////////////////
1558
+
1559
+ namespace std { // NOLINT(cert-dcl58-cpp)
1560
+
1561
+ template <class Key, class T, class Hash, class KeyEqual, class AllocatorOrContainer, class Bucket, class Pred>
1562
+ // NOLINTNEXTLINE(cert-dcl58-cpp)
1563
+ auto erase_if(ankerl::unordered_dense::detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket>& map, Pred pred)
1564
+ -> size_t {
1565
+ using map_t = ankerl::unordered_dense::detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket>;
1566
+
1567
+ // going back to front because erase() invalidates the end iterator
1568
+ auto const old_size = map.size();
1569
+ auto idx = old_size;
1570
+ while (idx) {
1571
+ --idx;
1572
+ auto it = map.begin() + static_cast<typename map_t::difference_type>(idx);
1573
+ if (pred(*it)) {
1574
+ map.erase(it);
1575
+ }
1576
+ }
1577
+
1578
+ return map.size() - old_size;
1579
+ }
1580
+
1581
+ } // namespace std
1582
+
1583
+ #endif
1584
+ #endif
data/bundled_deps/avrdude/CMakeLists.txt ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cmake_minimum_required(VERSION 3.0)
2
+
3
+ add_definitions(-D_BSD_SOURCE -D_DEFAULT_SOURCE) # To enable various useful macros and functions on Unices
4
+ remove_definitions(-D_UNICODE -DUNICODE)
5
+ set(CMAKE_POSITION_INDEPENDENT_CODE ON)
6
+ set(CMAKE_C_STANDARD 99)
7
+ set(CMAKE_C_STANDARD_REQUIRED ON)
8
+
9
+ if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
10
+ # Workaround for an old CMake, which does not understand CMAKE_C_STANDARD.
11
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99 -Wall")
12
+ endif()
13
+
14
+
15
+ set(AVRDUDE_SOURCES
16
+ avrdude/arduino.c
17
+ avrdude/avr.c
18
+ # avrdude/avrftdi.c
19
+ # avrdude/avrftdi_tpi.c
20
+ avrdude/avrpart.c
21
+ avrdude/avr910.c
22
+ avrdude/bitbang.c
23
+ avrdude/buspirate.c
24
+ avrdude/butterfly.c
25
+ avrdude/config.c
26
+ avrdude/config_gram.c
27
+ # avrdude/confwin.c
28
+ avrdude/crc16.c
29
+ # avrdude/dfu.c
30
+ avrdude/fileio.c
31
+ # avrdude/flip1.c
32
+ # avrdude/flip2.c
33
+ # avrdude/ft245r.c
34
+ # avrdude/jtagmkI.c
35
+ # avrdude/jtagmkII.c
36
+ # avrdude/jtag3.c
37
+ avrdude/lexer.c
38
+ avrdude/linuxgpio.c
39
+ avrdude/lists.c
40
+ # avrdude/par.c
41
+ avrdude/pgm.c
42
+ avrdude/pgm_type.c
43
+ avrdude/pickit2.c
44
+ avrdude/pindefs.c
45
+ # avrdude/ppi.c
46
+ # avrdude/ppiwin.c
47
+ avrdude/safemode.c
48
+ avrdude/ser_avrdoper.c
49
+ avrdude/serbb_posix.c
50
+ avrdude/serbb_win32.c
51
+ avrdude/ser_posix.c
52
+ avrdude/ser_win32.c
53
+ avrdude/stk500.c
54
+ avrdude/stk500generic.c
55
+ avrdude/stk500v2.c
56
+ avrdude/term.c
57
+ avrdude/update.c
58
+ # avrdude/usbasp.c
59
+ # avrdude/usb_hidapi.c
60
+ # avrdude/usb_libusb.c
61
+ # avrdude/usbtiny.c
62
+ avrdude/wiring.c
63
+
64
+ avrdude/main.c
65
+ avrdude/avrdude-slic3r.hpp
66
+ avrdude/avrdude-slic3r.cpp
67
+ )
68
+ if (MSVC)
69
+ set(AVRDUDE_SOURCES ${AVRDUDE_SOURCES}
70
+ avrdude/windows/utf8.c
71
+ avrdude/windows/unistd.cpp
72
+ avrdude/windows/getopt.c
73
+ )
74
+ elseif (MINGW)
75
+ set(AVRDUDE_SOURCES ${AVRDUDE_SOURCES}
76
+ avrdude/windows/utf8.c
77
+ )
78
+ endif()
79
+
80
+ include(bin2h)
81
+
82
+ bin2h(
83
+ SOURCE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/avrdude/avrdude-slic3r.conf
84
+ VARIABLE_NAME avrdude_slic3r_conf
85
+ HEADER_FILE ${CMAKE_CURRENT_BINARY_DIR}/avrdude-slic3r.conf.h
86
+ ADD_WARNING_TEXT
87
+ )
88
+
89
+ add_library(avrdude STATIC ${AVRDUDE_SOURCES})
90
+ target_link_libraries(avrdude PRIVATE localesutils)
91
+
92
+ add_executable(avrdude-slic3r avrdude/main-standalone.cpp)
93
+ target_link_libraries(avrdude-slic3r avrdude)
94
+
95
+ encoding_check(avrdude)
96
+ encoding_check(avrdude-slic3r)
97
+
98
+ # Make avrdude-slic3r.conf.h includable:
99
+ target_include_directories(avrdude SYSTEM PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
100
+ target_include_directories(avrdude PUBLIC .)
101
+
102
+ if (WIN32)
103
+ target_compile_definitions(avrdude PRIVATE WIN32NATIVE=1)
104
+ if(MSVC)
105
+ target_include_directories(avrdude SYSTEM PRIVATE avrdude/windows) # So that sources find the getopt.h windows drop-in
106
+ endif(MSVC)
107
+ endif()
data/bundled_deps/avrdude/avrdude/AUTHORS ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ AVRDUDE was written by:
2
+
3
+ Brian S. Dean <[email protected]>
4
+
5
+ Contributors:
6
+
7
+ Joerg Wunsch <[email protected]>
8
+ Eric Weddington <[email protected]>
9
+ Jan-Hinnerk Reichert <[email protected]>
10
+ Alex Shepherd <[email protected]>
11
+ Martin Thomas <[email protected]>
12
+ Theodore A. Roth <[email protected]>
13
+ Michael Holzt <[email protected]>
14
+ Colin O'Flynn <[email protected]>
15
+ Thomas Fischl <[email protected]>
16
+ David Hoerl <[email protected]>
17
+ Michal Ludvig <[email protected]>
18
+ Darell Tan <[email protected]>
19
+ Wolfgang Moser
20
+ Ville Voipio
21
+ Hannes Weisbach
22
+ Doug Springer
23
+ Brett Hagman <[email protected]>
24
+ Rene Liebscher <[email protected]>
25
+ Jim Paris <[email protected]>
26
+
27
+ For minor contributions, please see the ChangeLog files.
28
+
data/bundled_deps/avrdude/avrdude/BUILD-FROM-SVN ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ $Id$
2
+
3
+ How to build avrdude from SVN:
4
+
5
+ 1. svn co svn://svn.savannah.nongnu.org/avrdude/trunk
6
+
7
+ 2. cd trunk/avrdude
8
+
9
+ 3. ./bootstrap
10
+
11
+ 4. ./configure
12
+
13
+ 5. make