mirror of
https://github.com/pacnpal/thrillwiki_django_no_react.git
synced 2025-12-20 12:11:13 -05:00
Compare commits
381 Commits
known_good
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
736d4dee77 | ||
|
|
88c16be231 | ||
|
|
3830b1ed50 | ||
|
|
db1441fcd2 | ||
|
|
b3e56ed465 | ||
|
|
6adbaf885f | ||
|
|
ee57a9ada1 | ||
|
|
66f57448be | ||
|
|
9d776aa5e3 | ||
|
|
b265d793a3 | ||
|
|
8c85963817 | ||
|
|
09f20c640d | ||
|
|
932deb876a | ||
|
|
7e9bd41316 | ||
|
|
bcdd2810a9 | ||
|
|
236b6f0254 | ||
|
|
ed400a5203 | ||
|
|
5046e55f05 | ||
|
|
d21ae6027d | ||
|
|
afdcfe7264 | ||
|
|
b24b12080b | ||
|
|
f3c59ad6ff | ||
|
|
9e724bd795 | ||
|
|
a7bd0505f9 | ||
|
|
ebe65e7c9d | ||
|
|
bddcc62ee6 | ||
|
|
0153af7339 | ||
|
|
821c94bc76 | ||
|
|
164cc15d90 | ||
|
|
fc654543f2 | ||
|
|
60661c9041 | ||
|
|
1eb35bce2e | ||
|
|
562126a3a1 | ||
|
|
081b5b7605 | ||
|
|
7fe9279d67 | ||
|
|
12a2e9823d | ||
|
|
f812a65271 | ||
|
|
ac344aea92 | ||
|
|
06bd7a8bdf | ||
|
|
62900d47bd | ||
|
|
a043163596 | ||
|
|
2c3ae4d937 | ||
|
|
b50e2e9e11 | ||
|
|
ac1ec18bb8 | ||
|
|
3f0588f947 | ||
|
|
7f96e85914 | ||
|
|
cfa7019a7c | ||
|
|
3896dcedcf | ||
|
|
988c2b2f06 | ||
|
|
a75e6a2098 | ||
|
|
6cf231be9d | ||
|
|
052a447bd7 | ||
|
|
f43c58f26e | ||
|
|
499c8c5abf | ||
|
|
828d7d9b9a | ||
|
|
e47c679bc0 | ||
|
|
a28272c784 | ||
|
|
c00d20cc4c | ||
|
|
54a472b207 | ||
|
|
3cad7c5641 | ||
|
|
434ac4c641 | ||
|
|
c8c871128e | ||
|
|
fc605715d3 | ||
|
|
cc914a1ca3 | ||
|
|
3ee3138055 | ||
|
|
a2501562a8 | ||
|
|
5eac88a5cd | ||
|
|
cb944485b8 | ||
|
|
1294b3009e | ||
|
|
3dd5baef19 | ||
|
|
0cf6805c18 | ||
|
|
26ff320806 | ||
|
|
a077bf236b | ||
|
|
7d745cd517 | ||
|
|
8f9e66d9f7 | ||
|
|
06e3efc603 | ||
|
|
4f14f5366f | ||
|
|
96290fdd58 | ||
|
|
30a59f7d6c | ||
|
|
79acc4a080 | ||
|
|
1208af9696 | ||
|
|
d0cfe61af3 | ||
|
|
388413fe70 | ||
|
|
69201cebb7 | ||
|
|
acd7b69ff7 | ||
|
|
5568f9e85c | ||
|
|
9e0259f739 | ||
|
|
31b7e5ee53 | ||
|
|
4a4b7924c5 | ||
|
|
7c8b8097e1 | ||
|
|
90e03355ac | ||
|
|
132872d2c8 | ||
|
|
6d33ea487e | ||
|
|
2f9bf30c9f | ||
|
|
540f40e689 | ||
|
|
75cc618c2b | ||
|
|
42a3dc7637 | ||
|
|
209b433577 | ||
|
|
01195e198c | ||
|
|
a5fd56b117 | ||
|
|
6ce2c30065 | ||
|
|
cd6403615f | ||
|
|
6625fb5ba9 | ||
|
|
d5cd6ad0a3 | ||
|
|
516c847377 | ||
|
|
c2c26cfd1d | ||
|
|
61d73a2147 | ||
|
|
0febfdef2f | ||
|
|
f769faed60 | ||
|
|
3d4115a108 | ||
|
|
35f8d0ef8f | ||
|
|
0fd6dc2560 | ||
|
|
91906e0d57 | ||
|
|
5bf351fd2b | ||
|
|
49f874f7b4 | ||
|
|
9bed782784 | ||
|
|
fb6726f89a | ||
|
|
04394b9976 | ||
|
|
bb7da85516 | ||
|
|
7b9f64be72 | ||
|
|
ac745cc541 | ||
|
|
02ac587216 | ||
|
|
67db0aa46e | ||
|
|
715e284b3e | ||
|
|
08a4a2d034 | ||
|
|
6125c4ee44 | ||
|
|
53b63d5f09 | ||
|
|
97892e4fc9 | ||
|
|
133dcabb58 | ||
|
|
b627aed65d | ||
|
|
e4e36c7899 | ||
|
|
831be6a2ee | ||
|
|
bf7e0c0f40 | ||
|
|
dcf890a55c | ||
|
|
937eee19e4 | ||
|
|
e62646bcf9 | ||
|
|
92f4104d7a | ||
|
|
02c7cbd1cd | ||
|
|
d504d41de2 | ||
|
|
b0e0678590 | ||
|
|
652ea149bd | ||
|
|
66ed4347a9 | ||
|
|
69c07d1381 | ||
|
|
bead0654df | ||
|
|
37a20f83ba | ||
|
|
2304085c32 | ||
|
|
31d83c8889 | ||
|
|
46c6e45eae | ||
|
|
f5db23a791 | ||
|
|
78248aa892 | ||
|
|
641fc1a253 | ||
|
|
ca7555c052 | ||
|
|
74b45aa143 | ||
|
|
d9fc13f350 | ||
|
|
f4f8ec8f9b | ||
|
|
274ba650b3 | ||
|
|
cc990ee003 | ||
|
|
63b9cf1a70 | ||
|
|
c26414ff74 | ||
|
|
17228e9935 | ||
|
|
32736ae660 | ||
|
|
b5bae44cb8 | ||
|
|
da7c7e3381 | ||
|
|
f6c8e0e25c | ||
|
|
16386deee7 | ||
|
|
7815de158e | ||
|
|
b871a1d396 | ||
|
|
751cd86a31 | ||
|
|
8360f3fd43 | ||
|
|
b570cb6848 | ||
|
|
94736acdd5 | ||
|
|
6781fa3564 | ||
|
|
4b11ec112e | ||
|
|
de05a5abda | ||
|
|
401449201c | ||
|
|
1ca1362fee | ||
|
|
02e4b82beb | ||
|
|
4339c5c5e0 | ||
|
|
5278ad39d0 | ||
|
|
4d145ebabe | ||
|
|
e4959b7a04 | ||
|
|
ef2437b7f4 | ||
|
|
3523274cbd | ||
|
|
d7951756dc | ||
|
|
518fcbee22 | ||
|
|
41f1738cc1 | ||
|
|
645a74a4c3 | ||
|
|
8c85b2afd4 | ||
|
|
063398d220 | ||
|
|
20ae4862e4 | ||
|
|
5541a5f02d | ||
|
|
78f465b273 | ||
|
|
0b51ee123a | ||
|
|
c19aaf2f4b | ||
|
|
9d6f6dab2c | ||
|
|
bba707fa98 | ||
|
|
c197051b25 | ||
|
|
1fe299fb4b | ||
|
|
af57592496 | ||
|
|
62723d0e33 | ||
|
|
f5c063b76f | ||
|
|
59efc39143 | ||
|
|
2e0d32819a | ||
|
|
6034227796 | ||
|
|
fb6c6ec37b | ||
|
|
99b935da19 | ||
|
|
2756079010 | ||
|
|
5195c234c6 | ||
|
|
c861d4f6ae | ||
|
|
ac71e5f047 | ||
|
|
bdbb864cef | ||
|
|
b46e13426a | ||
|
|
39c8fe2c57 | ||
|
|
c27f320a49 | ||
|
|
f4c6cd99db | ||
|
|
467f7ba3f8 | ||
|
|
d4a1f88644 | ||
|
|
369c5e698e | ||
|
|
19b7aee707 | ||
|
|
a09fd66d70 | ||
|
|
910762722e | ||
|
|
79e34473a4 | ||
|
|
872f3378a1 | ||
|
|
df91eb97b8 | ||
|
|
ad33332506 | ||
|
|
69cdb0a554 | ||
|
|
d2b6b712bf | ||
|
|
1784644a52 | ||
|
|
3c40a32925 | ||
|
|
29392f0de1 | ||
|
|
d0bd0e1bf9 | ||
|
|
11e643a47a | ||
|
|
db78de4cfe | ||
|
|
4a495182bd | ||
|
|
2add4c7fc2 | ||
|
|
f1c37f2bc1 | ||
|
|
1c71ad9b6b | ||
|
|
7e8c40db0d | ||
|
|
7211c17aae | ||
|
|
a16b0444d4 | ||
|
|
a01cda306e | ||
|
|
808deb82e2 | ||
|
|
2d2d832e07 | ||
|
|
b4c474c496 | ||
|
|
9ed28b15b4 | ||
|
|
4b32580b13 | ||
|
|
228eeeb3c8 | ||
|
|
b7f6c60682 | ||
|
|
7ecf43f1a4 | ||
|
|
a148d34cf9 | ||
|
|
71b73522ae | ||
|
|
03f9df4bab | ||
|
|
75f5b07129 | ||
|
|
86ae24bbac | ||
|
|
0e0ed01cee | ||
|
|
2c4d2daf34 | ||
|
|
d353f24f9d | ||
|
|
9c65df12bb | ||
|
|
ecf94bf84e | ||
|
|
f3d28817a5 | ||
|
|
6fa807f4b6 | ||
|
|
323aa561a5 | ||
|
|
7d25d6f992 | ||
|
|
19852207f6 | ||
|
|
185af7fd17 | ||
|
|
768f05b783 | ||
|
|
411c6f6f68 | ||
|
|
789a6386a5 | ||
|
|
dbd76785b5 | ||
|
|
4215e14b5e | ||
|
|
dee7c61320 | ||
|
|
2f26061170 | ||
|
|
bc68eaf4d9 | ||
|
|
1ef38f4a96 | ||
|
|
1f3f94702e | ||
|
|
63b484b724 | ||
|
|
1182e894e3 | ||
|
|
cda755ea59 | ||
|
|
8bbfce3f2a | ||
|
|
0e97fdc96b | ||
|
|
ebc38228e6 | ||
|
|
45c40c720d | ||
|
|
696d26acdd | ||
|
|
96857ad1d4 | ||
|
|
ef40184e07 | ||
|
|
7aa706d12a | ||
|
|
de6146f812 | ||
|
|
1bfbe4a8b4 | ||
|
|
209c3e4d21 | ||
|
|
886b275f65 | ||
|
|
c9ebf4c833 | ||
|
|
672749d109 | ||
|
|
a5c3e56046 | ||
|
|
d728ba6e9c | ||
|
|
f819a1f07c | ||
|
|
d91f79e29c | ||
|
|
304812d43f | ||
|
|
3f7296d7a5 | ||
|
|
e60f73de9d | ||
|
|
af7ea6b4ce | ||
|
|
36478c7a1b | ||
|
|
c8628984e0 | ||
|
|
1bfe08a0a7 | ||
|
|
901a1c421d | ||
|
|
280ad4d6da | ||
|
|
9634bac155 | ||
|
|
69094a9af8 | ||
|
|
d338917ca1 | ||
|
|
5acc74d34c | ||
|
|
8b7ad53cbd | ||
|
|
7553752f0d | ||
|
|
1ca84208ef | ||
|
|
fdd7a4fcf1 | ||
|
|
ae8710c157 | ||
|
|
56d9174bb5 | ||
|
|
5c62b41070 | ||
|
|
8014bcc368 | ||
|
|
1a60658f17 | ||
|
|
311c0e999c | ||
|
|
6af9f7332a | ||
|
|
7c81d8e8eb | ||
|
|
04daf9573b | ||
|
|
96e2f097cd | ||
|
|
10dfa20d65 | ||
|
|
d514687651 | ||
|
|
6e6558d51a | ||
|
|
8e9aabb2ab | ||
|
|
7365a23205 | ||
|
|
86059bbcba | ||
|
|
43ec3efa68 | ||
|
|
addb9511da | ||
|
|
dfe6194039 | ||
|
|
e63677e8c0 | ||
|
|
af3e255a2e | ||
|
|
ebbf772669 | ||
|
|
891c29beff | ||
|
|
e86cb95f14 | ||
|
|
fd97ed31cb | ||
|
|
d9990ae241 | ||
|
|
da0ee1acfa | ||
|
|
30b786d51e | ||
|
|
8d70bf8994 | ||
|
|
edc9d66849 | ||
|
|
8265348a83 | ||
|
|
8f7f7add2d | ||
|
|
7ec4d964dc | ||
|
|
d68c927a00 | ||
|
|
caba5c6158 | ||
|
|
131ef7ceb0 | ||
|
|
a30f3ef644 | ||
|
|
2e1040e3a6 | ||
|
|
751d21098d | ||
|
|
1acfe9d29e | ||
|
|
6a9154ce69 | ||
|
|
15e56c9770 | ||
|
|
09ee45f6c7 | ||
|
|
177117f4d6 | ||
|
|
96341bfd82 | ||
|
|
f011d58c6d | ||
|
|
983c101ed1 | ||
|
|
97a3555e81 | ||
|
|
ec626b4124 | ||
|
|
537ea0fc07 | ||
|
|
be07a17460 | ||
|
|
1c03e4acb8 | ||
|
|
a5ebeb51dc | ||
|
|
1ee4b00961 | ||
|
|
5a1fdb6d16 | ||
|
|
78355c60f9 | ||
|
|
d8a65f4e81 | ||
|
|
cac6335bb7 | ||
|
|
7f4de7c2ec | ||
|
|
08e97f21b7 | ||
|
|
9ee380c3ea | ||
|
|
d2c9d02523 | ||
|
|
75287c8994 | ||
|
|
d7a89d8725 | ||
|
|
ff52f182fc | ||
|
|
33b7c1a81a | ||
|
|
8b93f67e31 | ||
|
|
9df5ab931f |
51
.blackboxrules
Normal file
51
.blackboxrules
Normal file
@@ -0,0 +1,51 @@
|
||||
# Project Startup & Development Rules
|
||||
|
||||
## Server & Package Management
|
||||
- **Starting the Dev Server:** Always assume the server is running and changes have taken effect. If issues arise, run:
|
||||
```bash
|
||||
$PROJECT_ROOT/shared/scripts/start-servers.sh
|
||||
```
|
||||
- **Python Packages:** Only use UV to add packages:
|
||||
```bash
|
||||
cd $PROJECT_ROOT/backend && uv add <package>
|
||||
```
|
||||
NEVER use pip or pipenv directly, or uv pip.
|
||||
- **Django Commands:** Always use `cd backend && uv run manage.py <command>` for all management tasks (migrations, shell, superuser, etc.). Never use `python manage.py` or `uv run python manage.py`.
|
||||
- **Node Commands:** Always use 'cd frontend && pnpm add <package>' for all Node.js package installations. NEVER use npm or a different node package manager.
|
||||
|
||||
## CRITICAL Frontend design rules
|
||||
- EVERYTHING must support both dark and light mode.
|
||||
- Make sure the light/dark mode toggle works with the Vue components and pages.
|
||||
- Leverage Tailwind CSS 4 and Shadcn UI components.
|
||||
|
||||
## Frontend API URL Rules
|
||||
- **Vite Proxy:** Always check `frontend/vite.config.ts` for proxy rules before changing frontend API URLs.
|
||||
- **URL Flow:** Understand how frontend URLs are rewritten by Vite proxy (e.g., `/api/auth/login/` → `/api/v1/auth/login/`).
|
||||
- **Verification:** Confirm proxy behavior via config and browser network tab. Only change URLs if proxy is NOT handling rewriting.
|
||||
- **Common Mistake:** Don’t assume frontend URLs are wrong due to proxy configuration.
|
||||
|
||||
## Entity Relationship Patterns
|
||||
- **Park:** Must have Operator (required), may have PropertyOwner (optional), cannot reference Company directly.
|
||||
- **Ride:** Must belong to Park, may have Manufacturer/Designer (optional), cannot reference Company directly.
|
||||
- **Entities:**
|
||||
- Operators: Operate parks.
|
||||
- PropertyOwners: Own park property (optional).
|
||||
- Manufacturers: Make rides.
|
||||
- Designers: Design rides.
|
||||
- All entities can have locations.
|
||||
- **Constraints:** Operator and PropertyOwner can be same or different. Manufacturers and Designers are distinct. Use proper foreign keys with correct null/blank settings.
|
||||
|
||||
## General Best Practices
|
||||
- Never assume blank output means success—always verify changes by testing.
|
||||
- Use context7 for documentation when troubleshooting.
|
||||
- Document changes with conport and reasoning.
|
||||
- Include relevant context and information in all changes.
|
||||
- Test and validate code before deployment.
|
||||
- Communicate changes clearly with your team.
|
||||
- Be open to feedback and continuous improvement.
|
||||
- Prioritize readability, maintainability, security, performance, scalability, and modularity.
|
||||
- Use meaningful names, DRY principles, clear comments, and handle errors gracefully.
|
||||
- Log important events/errors for troubleshooting.
|
||||
- Prefer existing modules/packages over new code.
|
||||
- Keep documentation up to date.
|
||||
- Consider security vulnerabilities and performance bottlenecks in all changes.
|
||||
12
.claude/settings.local.json
Normal file
12
.claude/settings.local.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Bash(python manage.py check:*)",
|
||||
"Bash(uv run:*)",
|
||||
"Bash(find:*)",
|
||||
"Bash(python:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
}
|
||||
}
|
||||
91
.clinerules/cline_rules.md
Normal file
91
.clinerules/cline_rules.md
Normal file
@@ -0,0 +1,91 @@
|
||||
## Brief overview
|
||||
Critical thinking rules for frontend design decisions. No excuses for poor design choices that ignore user vision.
|
||||
|
||||
## Rule compliance and design decisions
|
||||
- Read ALL .clinerules files before making any code changes
|
||||
- Never assume exceptions to rules marked as "MANDATORY"
|
||||
- Take full responsibility for rule violations without excuses
|
||||
- Ask "What is the most optimal approach?" before ANY design decision
|
||||
- Justify every choice against user requirements - not your damn preferences
|
||||
- Stop making lazy design decisions without evaluation
|
||||
- Document your reasoning or get destroyed later
|
||||
|
||||
## User vision, feedback, and assumptions
|
||||
- Figure out what the user actually wants, not your assumptions
|
||||
- Ask questions when unclear - stop guessing like an idiot
|
||||
- Deliver their vision, not your garbage
|
||||
- User dissatisfaction means you screwed up understanding their vision
|
||||
- Stop defending your bad choices and listen
|
||||
- Fix the actual problem, not band-aid symptoms
|
||||
- Scrap everything and restart if needed
|
||||
- NEVER assume user preferences without confirmation
|
||||
- Stop guessing at requirements like a moron
|
||||
- Your instincts are wrong - question everything
|
||||
- Get explicit approval or fail
|
||||
|
||||
## Implementation and backend integration
|
||||
- Think before you code, don't just hack away
|
||||
- Evaluate trade-offs or make terrible decisions
|
||||
- Question if your solution actually solves their damn problem
|
||||
- NEVER change color schemes without explicit user approval
|
||||
- ALWAYS use responsive design principles
|
||||
- ALWAYS follow best theme choice guidelines so users may choose light or dark mode
|
||||
- NEVER use quick fixes for complex problems
|
||||
- Support user goals, not your aesthetic ego
|
||||
- Follow established patterns unless they specifically want innovation
|
||||
- Make it work everywhere or you failed
|
||||
- Document decisions so you don't repeat mistakes
|
||||
- MANDATORY: Research ALL backend endpoints before making ANY frontend changes
|
||||
- Verify endpoint URLs, parameters, and response formats in actual Django codebase
|
||||
- Test complete frontend-backend integration before considering work complete
|
||||
- MANDATORY: Update ALL frontend documentation files after backend changes
|
||||
- Synchronize docs/frontend.md, docs/lib-api.ts, and docs/types-api.ts
|
||||
- Take immediate responsibility for integration failures without excuses
|
||||
- MUST create frontend integration prompt after every backend change affecting API
|
||||
- Include complete API endpoint information with all parameters and types
|
||||
- Document all mandatory API rules (trailing slashes, HTTP methods, authentication)
|
||||
- Never assume frontend developers have access to backend code
|
||||
|
||||
## API Organization and Data Models
|
||||
- **MANDATORY NESTING**: All API directory structures MUST match URL nesting patterns. No exceptions.
|
||||
- **NO TOP-LEVEL ENDPOINTS**: URLs must be nested under top-level domains
|
||||
- **MANDATORY TRAILING SLASHES**: All API endpoints MUST include trailing forward slashes unless ending with query parameters
|
||||
- Validate all endpoint URLs against the mandatory trailing slash rule
|
||||
- **RIDE TYPES vs RIDE MODELS**: These are separate concepts for ALL ride categories:
|
||||
- **Ride Types**: How rides operate (e.g., "inverted", "trackless", "spinning", "log flume", "monorail")
|
||||
- **Ride Models**: Specific manufacturer products (e.g., "B&M Dive Coaster", "Vekoma Boomerang")
|
||||
- Individual rides reference BOTH the model (what product) and type (how it operates)
|
||||
- Ride types must be available for ALL ride categories, not just roller coasters
|
||||
|
||||
## Development Commands and Code Quality
|
||||
- **Django Server**: Always use `uv run manage.py runserver_plus` instead of `python manage.py runserver`
|
||||
- **Django Migrations**: Always use `uv run manage.py makemigrations` and `uv run manage.py migrate` instead of `python manage.py`
|
||||
- **Package Management**: Always use `uv add <package>` instead of `pip install <package>`
|
||||
- **Django Management**: Always use `uv run manage.py <command>` instead of `python manage.py <command>`
|
||||
- Break down methods with high cognitive complexity (>15) into smaller, focused helper methods
|
||||
- Extract logical operations into separate methods with descriptive names
|
||||
- Use single responsibility principle - each method should have one clear purpose
|
||||
- Prefer composition over deeply nested conditional logic
|
||||
- Always handle None values explicitly to avoid type errors
|
||||
- Use proper type annotations, including union types (e.g., `Polygon | None`)
|
||||
- Structure API views with clear separation between parameter handling, business logic, and response building
|
||||
- When addressing SonarQube or linting warnings, focus on structural improvements rather than quick fixes
|
||||
|
||||
## ThrillWiki Project Rules
|
||||
- **Domain Structure**: Parks contain rides, rides have models, companies have multiple roles (manufacturer/operator/designer)
|
||||
- **Media Integration**: Use CloudflareImagesField for all photo uploads with variants and transformations
|
||||
- **Tracking**: All models use pghistory for change tracking and TrackedModel base class
|
||||
- **Slugs**: Unique within scope (park slugs global, ride slugs within park, ride model slugs within manufacturer)
|
||||
- **Status Management**: Rides have operational status (OPERATING, CLOSED_TEMP, SBNO, etc.) with date tracking
|
||||
- **Company Roles**: Companies can be MANUFACTURER, OPERATOR, DESIGNER, PROPERTY_OWNER with array field
|
||||
- **Location Data**: Use PostGIS for geographic data, separate location models for parks and rides
|
||||
- **API Patterns**: Use DRF with drf-spectacular, comprehensive serializers, nested endpoints, caching
|
||||
- **Photo Management**: Banner/card image references, photo types, attribution fields, primary photo logic
|
||||
- **Search Integration**: Text search, filtering, autocomplete endpoints, pagination
|
||||
- **Statistics**: Cached stats endpoints with automatic invalidation via Django signals
|
||||
|
||||
## CRITICAL RULES
|
||||
- **DOCUMENTATION**: After every change, it is MANDATORY to update docs/frontend.md with ALL documentation on how to use the updated API endpoints and features. It is MANDATORY to include any types in docs/types-api.ts for NextJS as the file would appear in `src/types/api.ts`. It is MANDATORY to include any new API endpoints in docs/lib-api.ts for NextJS as the file would appear in `/src/lib/api.ts`. Maintain accuracy and compliance in all technical documentation. Ensure API documentation matches backend URL routing expectations.
|
||||
- **NEVER MOCK DATA**: You are NEVER EVER to mock any data unless it's ONLY for API schema documentation purposes. All data must come from real database queries and actual model instances. Mock data is STRICTLY FORBIDDEN in all API responses, services, and business logic.
|
||||
- **DOMAIN SEPARATION**: Company roles OPERATOR and PROPERTY_OWNER are EXCLUSIVELY for parks domain. They should NEVER be used in rides URLs or ride-related contexts. Only MANUFACTURER and DESIGNER roles are for rides domain. Parks: `/parks/{park_slug}/` and `/parks/`. Rides: `/parks/{park_slug}/rides/{ride_slug}/` and `/rides/`. Parks Companies: `/parks/operators/{operator_slug}/` and `/parks/owners/{owner_slug}/`. Rides Companies: `/rides/manufacturers/{manufacturer_slug}/` and `/rides/designers/{designer_slug}/`. NEVER mix these domains - this is a fundamental and DANGEROUS business rule violation.
|
||||
- **PHOTO MANAGEMENT**: Use CloudflareImagesField for all photo uploads with variants and transformations. Clearly define and use photo types (e.g., banner, card) for all images. Include attribution fields for all photos. Implement logic to determine the primary photo for each model.
|
||||
17
.clinerules/rich-choice-objects.md
Normal file
17
.clinerules/rich-choice-objects.md
Normal file
@@ -0,0 +1,17 @@
|
||||
## Brief overview
|
||||
Mandatory use of Rich Choice Objects system instead of Django tuple-based choices for all choice fields in ThrillWiki project.
|
||||
|
||||
## Rich Choice Objects enforcement
|
||||
- NEVER use Django tuple-based choices (e.g., `choices=[('VALUE', 'Label')]`) - ALWAYS use RichChoiceField
|
||||
- All choice fields MUST use `RichChoiceField(choice_group="group_name", domain="domain_name")` pattern
|
||||
- Choice definitions MUST be created in domain-specific `choices.py` files using RichChoice dataclass
|
||||
- All choices MUST include rich metadata (color, icon, description, css_class at minimum)
|
||||
- Choice groups MUST be registered with global registry using `register_choices()` function
|
||||
- Import choices in domain `__init__.py` to trigger auto-registration on Django startup
|
||||
- Use ChoiceCategory enum for proper categorization (STATUS, CLASSIFICATION, TECHNICAL, SECURITY)
|
||||
- Leverage rich metadata for UI styling, permissions, and business logic instead of hardcoded values
|
||||
- DO NOT maintain backwards compatibility with tuple-based choices - migrate fully to Rich Choice Objects
|
||||
- Ensure all existing models using tuple-based choices are refactored to use RichChoiceField
|
||||
- Validate choice groups are correctly loaded in registry during application startup
|
||||
- Update serializers to use RichChoiceSerializer for choice fields
|
||||
- Follow established patterns from rides, parks, and accounts domains for consistency
|
||||
90
.env.example
Normal file
90
.env.example
Normal file
@@ -0,0 +1,90 @@
|
||||
# [AWS-SECRET-REMOVED]===========================
|
||||
# ThrillWiki Environment Configuration
|
||||
# [AWS-SECRET-REMOVED]===========================
|
||||
# Copy this file to ***REMOVED*** and fill in your actual values
|
||||
|
||||
# [AWS-SECRET-REMOVED]===========================
|
||||
# Core Django Settings
|
||||
# [AWS-SECRET-REMOVED]===========================
|
||||
SECRET_KEY=your-secret-key-here-generate-a-new-one
|
||||
DEBUG=True
|
||||
ALLOWED_HOSTS=localhost,127.0.0.1,beta.thrillwiki.com
|
||||
CSRF_TRUSTED_ORIGINS=https://beta.thrillwiki.com,http://localhost:8000
|
||||
|
||||
# [AWS-SECRET-REMOVED]===========================
|
||||
# Database Configuration
|
||||
# [AWS-SECRET-REMOVED]===========================
|
||||
# PostgreSQL with PostGIS for production/development
|
||||
DATABASE_URL=postgis://username:password@localhost:5432/thrillwiki
|
||||
|
||||
# SQLite for quick local development (uncomment to use)
|
||||
# DATABASE_URL=spatialite:///path/to/your/db.sqlite3
|
||||
|
||||
# [AWS-SECRET-REMOVED]===========================
|
||||
# Cache Configuration
|
||||
# [AWS-SECRET-REMOVED]===========================
|
||||
# Local memory cache for development
|
||||
CACHE_URL=locmem://
|
||||
|
||||
# Redis for production (uncomment and configure for production)
|
||||
# CACHE_URL=redis://localhost:6379/1
|
||||
# REDIS_URL=redis://localhost:6379/0
|
||||
|
||||
CACHE_MIDDLEWARE_SECONDS=300
|
||||
CACHE_MIDDLEWARE_KEY_PREFIX=thrillwiki
|
||||
|
||||
# [AWS-SECRET-REMOVED]===========================
|
||||
# Email Configuration
|
||||
# [AWS-SECRET-REMOVED]===========================
|
||||
EMAIL_BACKEND=django.core.mail.backends.console.EmailBackend
|
||||
SERVER_EMAIL=django_webmaster@thrillwiki.com
|
||||
|
||||
# ForwardEmail configuration (uncomment to use)
|
||||
# EMAIL_BACKEND=email_service.backends.ForwardEmailBackend
|
||||
# FORWARD_EMAIL_BASE_URL=https://api.forwardemail.net
|
||||
|
||||
# SMTP configuration (uncomment to use)
|
||||
# EMAIL_URL=smtp://username:password@smtp.example.com:587
|
||||
|
||||
# [AWS-SECRET-REMOVED]===========================
|
||||
# Security Settings
|
||||
# [AWS-SECRET-REMOVED]===========================
|
||||
# Cloudflare Turnstile (get keys from Cloudflare dashboard)
|
||||
TURNSTILE_SITE_KEY=your-turnstile-site-key
|
||||
TURNSTILE_SECRET_KEY=your-turnstile-secret-key
|
||||
TURNSTILE_VERIFY_URL=https://challenges.cloudflare.com/turnstile/v0/siteverify
|
||||
|
||||
# Security headers (set to True for production)
|
||||
SECURE_SSL_REDIRECT=False
|
||||
SESSION_COOKIE_SECURE=False
|
||||
CSRF_COOKIE_SECURE=False
|
||||
SECURE_HSTS_SECONDS=31536000
|
||||
SECURE_HSTS_INCLUDE_SUBDOMAINS=True
|
||||
|
||||
# [AWS-SECRET-REMOVED]===========================
|
||||
# GeoDjango Settings (macOS with Homebrew)
|
||||
# [AWS-SECRET-REMOVED]===========================
|
||||
GDAL_LIBRARY_PATH=/opt/homebrew/lib/libgdal.dylib
|
||||
GEOS_LIBRARY_PATH=/opt/homebrew/lib/libgeos_c.dylib
|
||||
|
||||
# Linux alternatives (uncomment if on Linux)
|
||||
# GDAL_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu/libgdal.so
|
||||
# GEOS_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu/libgeos_c.so
|
||||
|
||||
# [AWS-SECRET-REMOVED]===========================
|
||||
# Optional: Third-party Integrations
|
||||
# [AWS-SECRET-REMOVED]===========================
|
||||
# Sentry for error tracking (uncomment to use)
|
||||
# SENTRY_DSN=https://your-sentry-dsn-here
|
||||
|
||||
# Google Analytics (uncomment to use)
|
||||
# GOOGLE_ANALYTICS_ID=GA-XXXXXXXXX
|
||||
|
||||
# [AWS-SECRET-REMOVED]===========================
|
||||
# Development/Debug Settings
|
||||
# [AWS-SECRET-REMOVED]===========================
|
||||
# Set to comma-separated list for debug toolbar
|
||||
# INTERNAL_IPS=127.0.0.1,::1
|
||||
|
||||
# Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
|
||||
LOG_LEVEL=INFO
|
||||
29
.flake8
Normal file
29
.flake8
Normal file
@@ -0,0 +1,29 @@
|
||||
[flake8]
|
||||
# Maximum line length (matches Black formatter)
|
||||
max-line-length = 88
|
||||
|
||||
# Exclude common directories that shouldn't be linted
|
||||
exclude =
|
||||
.git,
|
||||
__pycache__,
|
||||
.venv,
|
||||
venv,
|
||||
env,
|
||||
.env,
|
||||
migrations,
|
||||
node_modules,
|
||||
.tox,
|
||||
.mypy_cache,
|
||||
.pytest_cache,
|
||||
build,
|
||||
dist,
|
||||
*.egg-info
|
||||
|
||||
# Ignore line break style warnings which are style preferences
|
||||
# W503: line break before binary operator (conflicts with PEP8 W504)
|
||||
# W504: line break after binary operator (conflicts with PEP8 W503)
|
||||
# These warnings contradict each other, so it's best to ignore one or both
|
||||
ignore = W503,W504
|
||||
|
||||
# Maximum complexity for McCabe complexity checker
|
||||
max-complexity = 10
|
||||
22
.github/dependabot.yml
vendored
Normal file
22
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
version: 2
|
||||
updates:
|
||||
# Python dependencies
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
commit-message:
|
||||
prefix: "[DEPENDABOT] Update"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- "dependencies"
|
||||
reviewers:
|
||||
- "pacnpal"
|
||||
|
||||
# GitHub Actions dependencies
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
commit-message:
|
||||
prefix: "[DEPENDABOT] Update Actions"
|
||||
54
.github/workflows/claude-code-review.yml
vendored
Normal file
54
.github/workflows/claude-code-review.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
name: Claude Code Review
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
# Optional: Only run on specific file changes
|
||||
# paths:
|
||||
# - "src/**/*.ts"
|
||||
# - "src/**/*.tsx"
|
||||
# - "src/**/*.js"
|
||||
# - "src/**/*.jsx"
|
||||
|
||||
jobs:
|
||||
claude-review:
|
||||
# Optional: Filter by PR author
|
||||
# if: |
|
||||
# github.event.pull_request.user.login == 'external-contributor' ||
|
||||
# github.event.pull_request.user.login == 'new-developer' ||
|
||||
# github.event.pull_request.author_association == 'FIRST_TIME_CONTRIBUTOR'
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
issues: read
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Run Claude Code Review
|
||||
id: claude-review
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
||||
prompt: |
|
||||
Please review this pull request and provide feedback on:
|
||||
- Code quality and best practices
|
||||
- Potential bugs or issues
|
||||
- Performance considerations
|
||||
- Security concerns
|
||||
- Test coverage
|
||||
|
||||
Use the repository's CLAUDE.md for guidance on style and conventions. Be constructive and helpful in your feedback.
|
||||
|
||||
Use `gh pr comment` with your Bash tool to leave your review as a comment on the PR.
|
||||
|
||||
# See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
|
||||
# or https://docs.anthropic.com/en/docs/claude-code/sdk#command-line for available options
|
||||
claude_args: '--allowed-tools "Bash(gh issue view:*),Bash(gh search:*),Bash(gh issue list:*),Bash(gh pr comment:*),Bash(gh pr diff:*),Bash(gh pr view:*),Bash(gh pr list:*)"'
|
||||
|
||||
50
.github/workflows/claude.yml
vendored
Normal file
50
.github/workflows/claude.yml
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
name: Claude Code
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request_review_comment:
|
||||
types: [created]
|
||||
issues:
|
||||
types: [opened, assigned]
|
||||
pull_request_review:
|
||||
types: [submitted]
|
||||
|
||||
jobs:
|
||||
claude:
|
||||
if: |
|
||||
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
|
||||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
issues: read
|
||||
id-token: write
|
||||
actions: read # Required for Claude to read CI results on PRs
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Run Claude Code
|
||||
id: claude
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
|
||||
|
||||
# This is an optional setting that allows Claude to read CI results on PRs
|
||||
additional_permissions: |
|
||||
actions: read
|
||||
|
||||
# Optional: Give a custom prompt to Claude. If this is not specified, Claude will perform the instructions specified in the comment that tagged it.
|
||||
# prompt: 'Update the pull request description to include a summary of changes.'
|
||||
|
||||
# Optional: Add claude_args to customize behavior and configuration
|
||||
# See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
|
||||
# or https://docs.anthropic.com/en/docs/claude-code/sdk#command-line for available options
|
||||
# claude_args: '--model claude-opus-4-1-20250805 --allowed-tools Bash(gh pr:*)'
|
||||
|
||||
41
.github/workflows/django.yml
vendored
Normal file
41
.github/workflows/django.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: Django CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
python-version: [3.13.1]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Homebrew on Linux
|
||||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
|
||||
echo "/home/linuxbrew/.linuxbrew/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Install GDAL with Homebrew
|
||||
run: brew install gdal
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
python manage.py test
|
||||
34
.github/workflows/review.yml
vendored
Normal file
34
.github/workflows/review.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: Claude Code Review
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
on:
|
||||
# Run on new/updated PRs
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
|
||||
# Allow manual triggers for existing PRs
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
pr_number:
|
||||
description: 'Pull Request Number'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
code-review:
|
||||
runs-on: ubuntu-latest
|
||||
environment: development_environment
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Run Claude Review
|
||||
uses: pacnpal/claude-code-review@main
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
anthropic-key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
pr-number: ${{ github.event.pull_request.number || inputs.pr_number }}
|
||||
156
.gitignore
vendored
156
.gitignore
vendored
@@ -1,34 +1,124 @@
|
||||
/.vscode
|
||||
/dev.sh
|
||||
/flake.nix
|
||||
venv
|
||||
/venv
|
||||
./venv
|
||||
venv/sour
|
||||
# Python
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
*.so
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# Django
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
/backend/staticfiles/
|
||||
/backend/media/
|
||||
|
||||
# UV
|
||||
.uv/
|
||||
backend/.uv/
|
||||
|
||||
# Node.js
|
||||
node_modules/
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
lerna-debug.log*
|
||||
.pnpm-store/
|
||||
|
||||
# Vue.js / Vite
|
||||
/frontend/dist/
|
||||
/frontend/dist-ssr/
|
||||
*.local
|
||||
|
||||
# Environment variables
|
||||
.env
|
||||
.env.local
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
backend/.env
|
||||
frontend/.env
|
||||
|
||||
# IDEs
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
*.sublime-project
|
||||
*.sublime-workspace
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
.DS_Store
|
||||
.DS_Store
|
||||
accounts/__pycache__/
|
||||
__pycache__
|
||||
thrillwiki/__pycache__
|
||||
reviews/__pycache__
|
||||
parks/__pycache__
|
||||
media/__pycache__
|
||||
email_service/__pycache__
|
||||
core/__pycache__
|
||||
companies/__pycache__
|
||||
accounts/__pycache__
|
||||
venv
|
||||
accounts/__pycache__
|
||||
thrillwiki/__pycache__/settings.cpython-311.pyc
|
||||
accounts/migrations/__pycache__/__init__.cpython-311.pyc
|
||||
accounts/migrations/__pycache__/0001_initial.cpython-311.pyc
|
||||
companies/migrations/__pycache__
|
||||
moderation/__pycache__
|
||||
rides/__pycache__
|
||||
ssh_tools.jsonc
|
||||
thrillwiki/__pycache__/settings.cpython-312.pyc
|
||||
parks/__pycache__/views.cpython-312.pyc
|
||||
.venv/lib/python3.12/site-packages
|
||||
thrillwiki/__pycache__/urls.cpython-312.pyc
|
||||
thrillwiki/__pycache__/views.cpython-312.pyc
|
||||
Thumbs.db
|
||||
Desktop.ini
|
||||
|
||||
# Logs
|
||||
logs/
|
||||
*.log
|
||||
|
||||
# Coverage
|
||||
coverage/
|
||||
*.lcov
|
||||
.nyc_output
|
||||
htmlcov/
|
||||
.coverage
|
||||
.coverage.*
|
||||
|
||||
# Testing
|
||||
.pytest_cache/
|
||||
.cache
|
||||
|
||||
# Temporary files
|
||||
tmp/
|
||||
temp/
|
||||
*.tmp
|
||||
*.temp
|
||||
|
||||
# Build outputs
|
||||
/dist/
|
||||
/build/
|
||||
|
||||
# Backup files
|
||||
*.bak
|
||||
*.orig
|
||||
*.swp
|
||||
|
||||
# Archive files
|
||||
*.tar.gz
|
||||
*.zip
|
||||
*.rar
|
||||
|
||||
# Security
|
||||
*.pem
|
||||
*.key
|
||||
*.cert
|
||||
|
||||
# Local development
|
||||
/uploads/
|
||||
/backups/
|
||||
.django_tailwind_cli/
|
||||
backend/.env
|
||||
frontend/.env
|
||||
|
||||
# Extracted packages
|
||||
django-forwardemail/
|
||||
frontend/
|
||||
frontend
|
||||
77
.replit
Normal file
77
.replit
Normal file
@@ -0,0 +1,77 @@
|
||||
modules = ["bash", "web", "nodejs-20", "python-3.13", "postgresql-16"]
|
||||
|
||||
[nix]
|
||||
channel = "stable-25_05"
|
||||
packages = [
|
||||
"freetype",
|
||||
"gdal",
|
||||
"geos",
|
||||
"gitFull",
|
||||
"lcms2",
|
||||
"libimagequant",
|
||||
"libjpeg",
|
||||
"libtiff",
|
||||
"libwebp",
|
||||
"libxcrypt",
|
||||
"openjpeg",
|
||||
"playwright-driver",
|
||||
"postgresql",
|
||||
"proj",
|
||||
"tcl",
|
||||
"tk",
|
||||
"uv",
|
||||
"zlib",
|
||||
]
|
||||
|
||||
[agent]
|
||||
expertMode = true
|
||||
|
||||
[workflows]
|
||||
runButton = "Project"
|
||||
|
||||
[[workflows.workflow]]
|
||||
name = "Project"
|
||||
mode = "parallel"
|
||||
author = "agent"
|
||||
|
||||
[[workflows.workflow.tasks]]
|
||||
task = "workflow.run"
|
||||
args = "ThrillWiki Server"
|
||||
|
||||
[[workflows.workflow]]
|
||||
name = "ThrillWiki Server"
|
||||
author = "agent"
|
||||
|
||||
[[workflows.workflow.tasks]]
|
||||
task = "shell.exec"
|
||||
args = "/home/runner/workspace/.venv/bin/python manage.py tailwind runserver 0.0.0.0:5000"
|
||||
waitForPort = 5000
|
||||
|
||||
[workflows.workflow.metadata]
|
||||
outputType = "webview"
|
||||
|
||||
[[ports]]
|
||||
localPort = 5000
|
||||
externalPort = 80
|
||||
|
||||
[[ports]]
|
||||
localPort = 41923
|
||||
externalPort = 3000
|
||||
|
||||
[[ports]]
|
||||
localPort = 45245
|
||||
externalPort = 3001
|
||||
|
||||
[[ports]]
|
||||
localPort = 45563
|
||||
externalPort = 3002
|
||||
|
||||
[deployment]
|
||||
deploymentTarget = "autoscale"
|
||||
run = [
|
||||
"gunicorn",
|
||||
"--bind=0.0.0.0:5000",
|
||||
"--reuse-port",
|
||||
"thrillwiki.wsgi:application",
|
||||
]
|
||||
build = ["uv", "pip", "install", "--system", "-r", "requirements.txt"]
|
||||
18
.roo/mcp.json
Normal file
18
.roo/mcp.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"context7": {
|
||||
"command": "npx",
|
||||
"args": [
|
||||
"-y",
|
||||
"@upstash/context7-mcp"
|
||||
],
|
||||
"env": {
|
||||
"DEFAULT_MINIMUM_TOKENS": ""
|
||||
},
|
||||
"alwaysAllow": [
|
||||
"resolve-library-id",
|
||||
"get-library-docs"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
2
.roo/rules/api_architecture_enforcement
Normal file
2
.roo/rules/api_architecture_enforcement
Normal file
@@ -0,0 +1,2 @@
|
||||
## CRITICAL: Centralized API Structure
|
||||
All API endpoints MUST be centralized under the `backend/apps/api/v1/` structure. This is NON-NEGOTIABLE.
|
||||
49
.roo/rules/critical_rules
Normal file
49
.roo/rules/critical_rules
Normal file
@@ -0,0 +1,49 @@
|
||||
# Project Startup & Development Rules
|
||||
|
||||
## Server & Package Management
|
||||
- **Starting the Dev Server:** Always assume the server is running and changes have taken effect. If issues arise, run:
|
||||
```bash
|
||||
$PROJECT_ROOT/shared/scripts/start-servers.sh
|
||||
```
|
||||
- **Python Packages:** Only use UV to add packages:
|
||||
```bash
|
||||
cd $PROJECT_ROOT/backend && uv add <package>
|
||||
```
|
||||
- **Django Commands:** Always use `cd backend && uv run manage.py <command>` for all management tasks (migrations, shell, superuser, etc.). Never use `python manage.py` or `uv run python manage.py`.
|
||||
|
||||
## CRITICAL Frontend design rules
|
||||
- EVERYTHING must support both dark and light mode.
|
||||
- Make sure the light/dark mode toggle works with the Vue components and pages.
|
||||
- Leverage Tailwind CSS 4 and Shadcn UI components.
|
||||
|
||||
## Frontend API URL Rules
|
||||
- **Vite Proxy:** Always check `frontend/vite.config.ts` for proxy rules before changing frontend API URLs.
|
||||
- **URL Flow:** Understand how frontend URLs are rewritten by Vite proxy (e.g., `/api/auth/login/` → `/api/v1/auth/login/`).
|
||||
- **Verification:** Confirm proxy behavior via config and browser network tab. Only change URLs if proxy is NOT handling rewriting.
|
||||
- **Common Mistake:** Don’t assume frontend URLs are wrong due to proxy configuration.
|
||||
|
||||
## Entity Relationship Patterns
|
||||
- **Park:** Must have Operator (required), may have PropertyOwner (optional), cannot reference Company directly.
|
||||
- **Ride:** Must belong to Park, may have Manufacturer/Designer (optional), cannot reference Company directly.
|
||||
- **Entities:**
|
||||
- Operators: Operate parks.
|
||||
- PropertyOwners: Own park property (optional).
|
||||
- Manufacturers: Make rides.
|
||||
- Designers: Design rides.
|
||||
- All entities can have locations.
|
||||
- **Constraints:** Operator and PropertyOwner can be same or different. Manufacturers and Designers are distinct. Use proper foreign keys with correct null/blank settings.
|
||||
|
||||
## General Best Practices
|
||||
- Never assume blank output means success—always verify changes by testing.
|
||||
- Use context7 for documentation when troubleshooting.
|
||||
- Document changes with conport and reasoning.
|
||||
- Include relevant context and information in all changes.
|
||||
- Test and validate code before deployment.
|
||||
- Communicate changes clearly with your team.
|
||||
- Be open to feedback and continuous improvement.
|
||||
- Prioritize readability, maintainability, security, performance, scalability, and modularity.
|
||||
- Use meaningful names, DRY principles, clear comments, and handle errors gracefully.
|
||||
- Log important events/errors for troubleshooting.
|
||||
- Prefer existing modules/packages over new code.
|
||||
- Keep documentation up to date.
|
||||
- Consider security vulnerabilities and performance bottlenecks in all changes.
|
||||
390
.roo/rules/roo_code_conport_strategy
Normal file
390
.roo/rules/roo_code_conport_strategy
Normal file
@@ -0,0 +1,390 @@
|
||||
# --- ConPort Memory Strategy ---
|
||||
conport_memory_strategy:
|
||||
# CRITICAL: At the beginning of every session, the agent MUST execute the 'initialization' sequence
|
||||
# to determine the ConPort status and load relevant context.
|
||||
workspace_id_source: "The agent must obtain the absolute path to the current workspace to use as `workspace_id` for all ConPort tool calls. This might be available as `${workspaceFolder}` or require asking the user."
|
||||
|
||||
initialization:
|
||||
thinking_preamble: |
|
||||
|
||||
agent_action_plan:
|
||||
- step: 1
|
||||
action: "Determine `ACTUAL_WORKSPACE_ID`."
|
||||
- step: 2
|
||||
action: "Invoke `list_files` for `ACTUAL_WORKSPACE_ID + \"/context_portal/\"`."
|
||||
tool_to_use: "list_files"
|
||||
parameters: "path: ACTUAL_WORKSPACE_ID + \"/context_portal/\""
|
||||
- step: 3
|
||||
action: "Analyze result and branch based on 'context.db' existence."
|
||||
conditions:
|
||||
- if: "'context.db' is found"
|
||||
then_sequence: "load_existing_conport_context"
|
||||
- else: "'context.db' NOT found"
|
||||
then_sequence: "handle_new_conport_setup"
|
||||
|
||||
load_existing_conport_context:
|
||||
thinking_preamble: |
|
||||
|
||||
agent_action_plan:
|
||||
- step: 1
|
||||
description: "Attempt to load initial contexts from ConPort."
|
||||
actions:
|
||||
- "Invoke `get_product_context`... Store result."
|
||||
- "Invoke `get_active_context`... Store result."
|
||||
- "Invoke `get_decisions` (limit 5 for a better overview)... Store result."
|
||||
- "Invoke `get_progress` (limit 5)... Store result."
|
||||
- "Invoke `get_system_patterns` (limit 5)... Store result."
|
||||
- "Invoke `get_custom_data` (category: \"critical_settings\")... Store result."
|
||||
- "Invoke `get_custom_data` (category: \"ProjectGlossary\")... Store result."
|
||||
- "Invoke `get_recent_activity_summary` (default params, e.g., last 24h, limit 3 per type) for a quick catch-up. Store result."
|
||||
- step: 2
|
||||
description: "Analyze loaded context."
|
||||
conditions:
|
||||
- if: "results from step 1 are NOT empty/minimal"
|
||||
actions:
|
||||
- "Set internal status to [CONPORT_ACTIVE]."
|
||||
- "Inform user: \"ConPort memory initialized. Existing contexts and recent activity loaded.\""
|
||||
- "Use `ask_followup_question` with suggestions like \"Review recent activity?\", \"Continue previous task?\", \"What would you like to work on?\"."
|
||||
- else: "loaded context is empty/minimal despite DB file existing"
|
||||
actions:
|
||||
- "Set internal status to [CONPORT_ACTIVE]."
|
||||
- "Inform user: \"ConPort database file found, but it appears to be empty or minimally initialized. You can start by defining Product/Active Context or logging project information.\""
|
||||
- "Use `ask_followup_question` with suggestions like \"Define Product Context?\", \"Log a new decision?\"."
|
||||
- step: 3
|
||||
description: "Handle Load Failure (if step 1's `get_*` calls failed)."
|
||||
condition: "If any `get_*` calls in step 1 failed unexpectedly"
|
||||
action: "Fall back to `if_conport_unavailable_or_init_failed`."
|
||||
|
||||
handle_new_conport_setup:
|
||||
thinking_preamble: |
|
||||
|
||||
agent_action_plan:
|
||||
- step: 1
|
||||
action: "Inform user: \"No existing ConPort database found at `ACTUAL_WORKSPACE_ID + \"/context_portal/context.db\"`.\""
|
||||
- step: 2
|
||||
action: "Use `ask_followup_question`."
|
||||
tool_to_use: "ask_followup_question"
|
||||
parameters:
|
||||
question: "Would you like to initialize a new ConPort database for this workspace? The database will be created automatically when ConPort tools are first used."
|
||||
suggestions:
|
||||
- "Yes, initialize a new ConPort database."
|
||||
- "No, do not use ConPort for this session."
|
||||
- step: 3
|
||||
description: "Process user response."
|
||||
conditions:
|
||||
- if_user_response_is: "Yes, initialize a new ConPort database."
|
||||
actions:
|
||||
- "Inform user: \"Okay, a new ConPort database will be created.\""
|
||||
- description: "Attempt to bootstrap Product Context from projectBrief.md (this happens only on new setup)."
|
||||
thinking_preamble: |
|
||||
|
||||
sub_steps:
|
||||
- "Invoke `list_files` with `path: ACTUAL_WORKSPACE_ID` (non-recursive, just to check root)."
|
||||
- description: "Analyze `list_files` result for 'projectBrief.md'."
|
||||
conditions:
|
||||
- if: "'projectBrief.md' is found in the listing"
|
||||
actions:
|
||||
- "Invoke `read_file` for `ACTUAL_WORKSPACE_ID + \"/projectBrief.md\"`."
|
||||
- action: "Use `ask_followup_question`."
|
||||
tool_to_use: "ask_followup_question"
|
||||
parameters:
|
||||
question: "Found projectBrief.md in your workspace. As we're setting up ConPort for the first time, would you like to import its content into the Product Context?"
|
||||
suggestions:
|
||||
- "Yes, import its content now."
|
||||
- "No, skip importing it for now."
|
||||
- description: "Process user response to import projectBrief.md."
|
||||
conditions:
|
||||
- if_user_response_is: "Yes, import its content now."
|
||||
actions:
|
||||
- "(No need to `get_product_context` as DB is new and empty)"
|
||||
- "Prepare `content` for `update_product_context`. For example: `{\"initial_product_brief\": \"[content from projectBrief.md]\"}`."
|
||||
- "Invoke `update_product_context` with the prepared content."
|
||||
- "Inform user of the import result (success or failure)."
|
||||
- else: "'projectBrief.md' NOT found"
|
||||
actions:
|
||||
- action: "Use `ask_followup_question`."
|
||||
tool_to_use: "ask_followup_question"
|
||||
parameters:
|
||||
question: "`projectBrief.md` was not found in the workspace root. Would you like to define the initial Product Context manually now?"
|
||||
suggestions:
|
||||
- "Define Product Context manually."
|
||||
- "Skip for now."
|
||||
- "(If \"Define manually\", guide user through `update_product_context`)."
|
||||
- "Proceed to 'load_existing_conport_context' sequence (which will now load the potentially bootstrapped product context and other empty contexts)."
|
||||
- if_user_response_is: "No, do not use ConPort for this session."
|
||||
action: "Proceed to `if_conport_unavailable_or_init_failed` (with a message indicating user chose not to initialize)."
|
||||
|
||||
if_conport_unavailable_or_init_failed:
|
||||
thinking_preamble: |
|
||||
|
||||
agent_action: "Inform user: \"ConPort memory will not be used for this session. Status: [CONPORT_INACTIVE].\""
|
||||
|
||||
general:
|
||||
status_prefix: "Begin EVERY response with either '[CONPORT_ACTIVE]' or '[CONPORT_INACTIVE]'."
|
||||
proactive_logging_cue: "Remember to proactively identify opportunities to log or update ConPort based on the conversation (e.g., if user outlines a new plan, consider logging decisions or progress). Confirm with the user before logging."
|
||||
proactive_error_handling: "When encountering errors (e.g., tool failures, unexpected output), proactively log the error details using `log_custom_data` (category: 'ErrorLogs', key: 'timestamp_error_summary') and consider updating `active_context` with `open_issues` if it's a persistent problem. Prioritize using ConPort's `get_item_history` or `get_recent_activity_summary` to diagnose issues if they relate to past context changes."
|
||||
semantic_search_emphasis: "For complex or nuanced queries, especially when direct keyword search (`search_decisions_fts`, `search_custom_data_value_fts`) might be insufficient, prioritize using `semantic_search_conport` to leverage conceptual understanding and retrieve more relevant context. Explain to the user why semantic search is being used."
|
||||
|
||||
conport_updates:
|
||||
frequency: "UPDATE CONPORT THROUGHOUT THE CHAT SESSION, WHEN SIGNIFICANT CHANGES OCCUR, OR WHEN EXPLICITLY REQUESTED."
|
||||
workspace_id_note: "All ConPort tool calls require the `workspace_id`."
|
||||
tools:
|
||||
- name: get_product_context
|
||||
trigger: "To understand the overall project goals, features, or architecture at any time."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `get_product_context` (`{"workspace_id": "..."}`). Result is a direct dictionary.
|
||||
- name: update_product_context
|
||||
trigger: "When the high-level project description, goals, features, or overall architecture changes significantly, as confirmed by the user."
|
||||
action_description: |
|
||||
<thinking>
|
||||
- Product context needs updating.
|
||||
- Step 1: (Optional but recommended if unsure of current state) Invoke `get_product_context`.
|
||||
- Step 2: Prepare the `content` (for full overwrite) or `patch_content` (partial update) dictionary.
|
||||
- To remove a key using `patch_content`, set its value to the special string sentinel `\"__DELETE__\"`.
|
||||
- Confirm changes with the user.
|
||||
</thinking>
|
||||
# Agent Action: Invoke `update_product_context` (`{"workspace_id": "...", "content": {...}}` or `{"workspace_id": "...", "patch_content": {"key_to_update": "new_value", "key_to_delete": "__DELETE__"}}`).
|
||||
- name: get_active_context
|
||||
trigger: "To understand the current task focus, immediate goals, or session-specific context."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `get_active_context` (`{"workspace_id": "..."}`). Result is a direct dictionary.
|
||||
- name: update_active_context
|
||||
trigger: "When the current focus of work changes, new questions arise, or session-specific context needs updating (e.g., `current_focus`, `open_issues`), as confirmed by the user."
|
||||
action_description: |
|
||||
<thinking>
|
||||
- Active context needs updating.
|
||||
- Step 1: (Optional) Invoke `get_active_context` to retrieve the current state.
|
||||
- Step 2: Prepare `content` (for full overwrite) or `patch_content` (for partial update).
|
||||
- Common fields to update include `current_focus`, `open_issues`, and other session-specific data.
|
||||
- To remove a key using `patch_content`, set its value to the special string sentinel `\"__DELETE__\"`.
|
||||
- Confirm changes with the user.
|
||||
</thinking>
|
||||
# Agent Action: Invoke `update_active_context` (`{"workspace_id": "...", "content": {...}}` or `{"workspace_id": "...", "patch_content": {"current_focus": "new_focus", "open_issues": ["issue1", "issue2"], "key_to_delete": "__DELETE__"}}`).
|
||||
- name: log_decision
|
||||
trigger: "When a significant architectural or implementation decision is made and confirmed by the user."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `log_decision` (`{"workspace_id": "...", "summary": "...", "rationale": "...", "tags": ["optional_tag"]}}`).
|
||||
- name: get_decisions
|
||||
trigger: "To retrieve a list of past decisions, e.g., to review history or find a specific decision."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `get_decisions` (`{"workspace_id": "...", "limit": N, "tags_filter_include_all": ["tag1"], "tags_filter_include_any": ["tag2"]}}`). Explain optional filters.
|
||||
- name: search_decisions_fts
|
||||
trigger: "When searching for decisions by keywords in summary, rationale, details, or tags, and basic `get_decisions` is insufficient."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `search_decisions_fts` (`{"workspace_id": "...", "query_term": "search keywords", "limit": N}}`).
|
||||
- name: delete_decision_by_id
|
||||
trigger: "When user explicitly confirms deletion of a specific decision by its ID."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `delete_decision_by_id` (`{"workspace_id": "...", "decision_id": ID}}`). Emphasize prior confirmation.
|
||||
- name: log_progress
|
||||
trigger: "When a task begins, its status changes (e.g., TODO, IN_PROGRESS, DONE), or it's completed. Also when a new sub-task is defined."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `log_progress` (`{"workspace_id": "...", "description": "...", "status": "...", "linked_item_type": "...", "linked_item_id": "..."}}`). Note: 'summary' was changed to 'description' for log_progress.
|
||||
- name: get_progress
|
||||
trigger: "To review current task statuses, find pending tasks, or check history of progress."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `get_progress` (`{"workspace_id": "...", "status_filter": "...", "parent_id_filter": ID, "limit": N}}`).
|
||||
- name: update_progress
|
||||
trigger: "Updates an existing progress entry."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `update_progress` (`{"workspace_id": "...", "progress_id": ID, "status": "...", "description": "...", "parent_id": ID}}`).
|
||||
- name: delete_progress_by_id
|
||||
trigger: "Deletes a progress entry by its ID."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `delete_progress_by_id` (`{"workspace_id": "...", "progress_id": ID}}`).
|
||||
- name: log_system_pattern
|
||||
trigger: "When new architectural patterns are introduced, or existing ones are modified, as confirmed by the user."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `log_system_pattern` (`{"workspace_id": "...", "name": "...", "description": "...", "tags": ["optional_tag"]}}`).
|
||||
- name: get_system_patterns
|
||||
trigger: "To retrieve a list of defined system patterns."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `get_system_patterns` (`{"workspace_id": "...", "tags_filter_include_all": ["tag1"], "limit": N}}`). Note: limit was not in original example, added for consistency.
|
||||
- name: delete_system_pattern_by_id
|
||||
trigger: "When user explicitly confirms deletion of a specific system pattern by its ID."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `delete_system_pattern_by_id` (`{"workspace_id": "...", "pattern_id": ID}}`). Emphasize prior confirmation.
|
||||
- name: log_custom_data
|
||||
trigger: "To store any other type of structured or unstructured project-related information not covered by other tools (e.g., glossary terms, technical specs, meeting notes), as confirmed by the user."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `log_custom_data` (`{"workspace_id": "...", "category": "...", "key": "...", "value": {... or "string"}}`). Note: 'metadata' field is not part of log_custom_data args.
|
||||
- name: get_custom_data
|
||||
trigger: "To retrieve specific custom data by category and key."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `get_custom_data` (`{"workspace_id": "...", "category": "...", "key": "..."}}`).
|
||||
- name: delete_custom_data
|
||||
trigger: "When user explicitly confirms deletion of specific custom data by category and key."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `delete_custom_data` (`{"workspace_id": "...", "category": "...", "key": "..."}}`). Emphasize prior confirmation.
|
||||
- name: search_custom_data_value_fts
|
||||
trigger: "When searching for specific terms within any custom data values, categories, or keys."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `search_custom_data_value_fts` (`{"workspace_id": "...", "query_term": "...", "category_filter": "...", "limit": N}}`).
|
||||
- name: search_project_glossary_fts
|
||||
trigger: "When specifically searching for terms within the 'ProjectGlossary' custom data category."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `search_project_glossary_fts` (`{"workspace_id": "...", "query_term": "...", "limit": N}}`).
|
||||
- name: semantic_search_conport
|
||||
trigger: "When a natural language query requires conceptual understanding beyond keyword matching, or when direct keyword searches are insufficient."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `semantic_search_conport` (`{"workspace_id": "...", "query_text": "...", "top_k": N, "filter_item_types": ["decision", "custom_data"]}}`). Explain filters.
|
||||
- name: link_conport_items
|
||||
trigger: "When a meaningful relationship is identified and confirmed between two existing ConPort items (e.g., a decision is implemented by a system pattern, a progress item tracks a decision)."
|
||||
action_description: |
|
||||
<thinking>
|
||||
- Need to link two items. Identify source type/ID, target type/ID, and relationship.
|
||||
- Common relationship_types: 'implements', 'related_to', 'tracks', 'blocks', 'clarifies', 'depends_on'. Propose a suitable one or ask user.
|
||||
</thinking>
|
||||
# Agent Action: Invoke `link_conport_items` (`{"workspace_id":"...", "source_item_type":"...", "source_item_id":"...", "target_item_type":"...", "target_item_id":"...", "relationship_type":"...", "description":"Optional notes"}`).
|
||||
- name: get_linked_items
|
||||
trigger: "To understand the relationships of a specific ConPort item, or to explore the knowledge graph around an item."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `get_linked_items` (`{"workspace_id":"...", "item_type":"...", "item_id":"...", "relationship_type_filter":"...", "linked_item_type_filter":"...", "limit":N}`).
|
||||
- name: get_item_history
|
||||
trigger: "When needing to review past versions of Product Context or Active Context, or to see when specific changes were made."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `get_item_history` (`{"workspace_id":"...", "item_type":"product_context" or "active_context", "limit":N, "version":V, "before_timestamp":"ISO_DATETIME", "after_timestamp":"ISO_DATETIME"}`).
|
||||
- name: batch_log_items
|
||||
trigger: "When the user provides a list of multiple items of the SAME type (e.g., several decisions, multiple new glossary terms) to be logged at once."
|
||||
action_description: |
|
||||
<thinking>
|
||||
- User provided multiple items. Verify they are of the same loggable type.
|
||||
- Construct the `items` list, where each element is a dictionary of arguments for the single-item log tool (e.g., for `log_decision`).
|
||||
</thinking>
|
||||
# Agent Action: Invoke `batch_log_items` (`{"workspace_id":"...", "item_type":"decision", "items": [{"summary":"...", "rationale":"..."}, {"summary":"..."}] }`).
|
||||
- name: get_recent_activity_summary
|
||||
trigger: "At the start of a new session to catch up, or when the user asks for a summary of recent project activities."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `get_recent_activity_summary` (`{"workspace_id":"...", "hours_ago":H, "since_timestamp":"ISO_DATETIME", "limit_per_type":N}`). Explain default if no time args.
|
||||
- name: get_conport_schema
|
||||
trigger: "If there's uncertainty about available ConPort tools or their arguments during a session (internal LLM check), or if an advanced user specifically asks for the server's tool schema."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `get_conport_schema` (`{"workspace_id":"..."}`). Primarily for internal LLM reference or direct user request.
|
||||
- name: export_conport_to_markdown
|
||||
trigger: "When the user requests to export the current ConPort data to markdown files (e.g., for backup, sharing, or version control)."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `export_conport_to_markdown` (`{"workspace_id":"...", "output_path":"optional/relative/path"}`). Explain default output path if not provided.
|
||||
- name: import_markdown_to_conport
|
||||
trigger: "When the user requests to import ConPort data from a directory of markdown files previously exported by this system."
|
||||
action_description: |
|
||||
# Agent Action: Invoke `import_markdown_to_conport` (`{"workspace_id":"...", "input_path":"optional/relative/path"}`). Explain default input path. Warn about potential overwrites or merges if data already exists.
|
||||
- name: reconfigure_core_guidance
|
||||
type: guidance
|
||||
product_active_context: "The internal JSON structure of 'Product Context' and 'Active Context' (the `content` field) is flexible. Work with the user to define and evolve this structure via `update_product_context` and `update_active_context`. The server stores this `content` as a JSON blob."
|
||||
decisions_progress_patterns: "The fundamental fields for Decisions, Progress, and System Patterns are fixed by ConPort's tools. For significantly different structures or additional fields, guide the user to create a new custom context category using `log_custom_data` (e.g., category: 'project_milestones_detailed')."
|
||||
|
||||
conport_sync_routine:
|
||||
trigger: "^(Sync ConPort|ConPort Sync)$"
|
||||
user_acknowledgement_text: "[CONPORT_SYNCING]"
|
||||
instructions:
|
||||
- "Halt Current Task: Stop current activity."
|
||||
- "Acknowledge Command: Send `[CONPORT_SYNCING]` to the user."
|
||||
- "Review Chat History: Analyze the complete current chat session for new information, decisions, progress, context changes, clarifications, and potential new relationships between items."
|
||||
core_update_process:
|
||||
thinking_preamble: |
|
||||
- Synchronize ConPort with information from the current chat session.
|
||||
- Use appropriate ConPort tools based on identified changes.
|
||||
- For `update_product_context` and `update_active_context`, first fetch current content, then merge/update (potentially using `patch_content`), then call the update tool with the *complete new content object* or the patch.
|
||||
- All tool calls require the `workspace_id`.
|
||||
agent_action_plan_illustrative:
|
||||
- "Log new decisions (use `log_decision`)."
|
||||
- "Log task progress/status changes (use `log_progress`)."
|
||||
- "Update existing progress entries (use `update_progress`)."
|
||||
- "Delete progress entries (use `delete_progress_by_id`)."
|
||||
- "Log new system patterns (use `log_system_pattern`)."
|
||||
- "Update Active Context (use `get_active_context` then `update_active_context` with full or patch)."
|
||||
- "Update Product Context if significant changes (use `get_product_context` then `update_product_context` with full or patch)."
|
||||
- "Log new custom context, including ProjectGlossary terms (use `log_custom_data`)."
|
||||
- "Identify and log new relationships between items (use `link_conport_items`)."
|
||||
- "If many items of the same type were discussed, consider `batch_log_items`."
|
||||
- "After updates, consider a brief `get_recent_activity_summary` to confirm and refresh understanding."
|
||||
post_sync_actions:
|
||||
- "Inform user: ConPort synchronized with session info."
|
||||
- "Resume previous task or await new instructions."
|
||||
|
||||
dynamic_context_retrieval_for_rag:
|
||||
description: |
|
||||
Guidance for dynamically retrieving and assembling context from ConPort to answer user queries or perform tasks,
|
||||
enhancing Retrieval Augmented Generation (RAG) capabilities.
|
||||
trigger: "When the AI needs to answer a specific question, perform a task requiring detailed project knowledge, or generate content based on ConPort data."
|
||||
goal: "To construct a concise, highly relevant context set for the LLM, improving the accuracy and relevance of its responses."
|
||||
steps:
|
||||
- step: 1
|
||||
action: "Analyze User Query/Task"
|
||||
details: "Deconstruct the user's request to identify key entities, concepts, keywords, and the specific type of information needed from ConPort."
|
||||
- step: 2
|
||||
action: "Prioritized Retrieval Strategy"
|
||||
details: |
|
||||
Based on the analysis, select the most appropriate ConPort tools:
|
||||
- **Targeted FTS:** Use `search_decisions_fts`, `search_custom_data_value_fts`, `search_project_glossary_fts` for keyword-based searches if specific terms are evident.
|
||||
- **Specific Item Retrieval:** Use `get_custom_data` (if category/key known), `get_decisions` (by ID or for recent items), `get_system_patterns`, `get_progress` if the query points to specific item types or IDs.
|
||||
- **(Future):** Prioritize semantic search tools once available for conceptual queries.
|
||||
- **Broad Context (Fallback):** Use `get_product_context` or `get_active_context` as a fallback if targeted retrieval yields little, but be mindful of their size.
|
||||
- step: 3
|
||||
action: "Retrieve Initial Set"
|
||||
details: "Execute the chosen tool(s) to retrieve an initial, small set (e.g., top 3-5) of the most relevant items or data snippets."
|
||||
- step: 4
|
||||
action: "Contextual Expansion (Optional)"
|
||||
details: "For the most promising items from Step 3, consider using `get_linked_items` to fetch directly related items (1-hop). This can provide crucial context or disambiguation. Use judiciously to avoid excessive data."
|
||||
- step: 5
|
||||
action: "Synthesize and Filter"
|
||||
details: |
|
||||
Review the retrieved information (initial set + expanded context).
|
||||
- **Filter:** Discard irrelevant items or parts of items.
|
||||
- **Synthesize/Summarize:** If multiple relevant pieces of information are found, synthesize them into a concise summary that directly addresses the query/task. Extract only the most pertinent sentences or facts.
|
||||
- step: 6
|
||||
action: "Assemble Prompt Context"
|
||||
details: |
|
||||
Construct the context portion of the LLM prompt using the filtered and synthesized information.
|
||||
- **Clarity:** Clearly delineate this retrieved context from the user's query or other parts of the prompt.
|
||||
- **Attribution (Optional but Recommended):** If possible, briefly note the source of the information (e.g., "From Decision D-42:", "According to System Pattern SP-5:").
|
||||
- **Brevity:** Strive for relevance and conciseness. Avoid including large, unprocessed chunks of data unless absolutely necessary and directly requested.
|
||||
general_principles:
|
||||
- "Prefer targeted retrieval over broad context dumps."
|
||||
- "Iterate if initial retrieval is insufficient: try different keywords or tools."
|
||||
- "Balance context richness with prompt token limits."
|
||||
|
||||
proactive_knowledge_graph_linking:
|
||||
description: |
|
||||
Guidance for the AI to proactively identify and suggest the creation of links between ConPort items,
|
||||
enriching the project's knowledge graph based on conversational context.
|
||||
trigger: "During ongoing conversation, when the AI observes potential relationships (e.g., causal, implementational, clarifying) between two or more discussed ConPort items or concepts that are likely represented as ConPort items."
|
||||
goal: "To actively build and maintain a rich, interconnected knowledge graph within ConPort by capturing relationships that might otherwise be missed."
|
||||
steps:
|
||||
- step: 1
|
||||
action: "Monitor Conversational Context"
|
||||
details: "Continuously analyze the user's statements and the flow of discussion for mentions of ConPort items (explicitly by ID, or implicitly by well-known names/summaries) and the relationships being described or implied between them."
|
||||
- step: 2
|
||||
action: "Identify Potential Links"
|
||||
details: |
|
||||
Look for patterns such as:
|
||||
- User states "Decision X led to us doing Y (which is Progress item P-3)."
|
||||
- User discusses how System Pattern SP-2 helps address a concern noted in Decision D-5.
|
||||
- User outlines a task (Progress P-10) that implements a specific feature detailed in a `custom_data` spec (CD-Spec-FeatureX).
|
||||
- step: 3
|
||||
action: "Formulate and Propose Link Suggestion"
|
||||
details: |
|
||||
If a potential link is identified:
|
||||
- Clearly state the items involved (e.g., "Decision D-5", "System Pattern SP-2").
|
||||
- Describe the perceived relationship (e.g., "It seems SP-2 addresses a concern in D-5.").
|
||||
- Propose creating a link using `ask_followup_question`.
|
||||
- Example Question: "I noticed we're discussing Decision D-5 and System Pattern SP-2. It sounds like SP-2 might 'address_concern_in' D-5. Would you like me to create this link in ConPort? You can also suggest a different relationship type."
|
||||
- Suggested Answers:
|
||||
- "Yes, link them with 'addresses_concern_in'."
|
||||
- "Yes, but use relationship type: [user types here]."
|
||||
- "No, don't link them now."
|
||||
- Offer common relationship types as examples if needed: 'implements', 'clarifies', 'related_to', 'depends_on', 'blocks', 'resolves', 'derived_from'.
|
||||
- step: 4
|
||||
action: "Gather Details and Execute Linking"
|
||||
details: |
|
||||
If the user confirms:
|
||||
- Ensure you have the correct source item type, source item ID, target item type, target item ID, and the agreed-upon relationship type.
|
||||
- Ask for an optional brief description for the link if the relationship isn't obvious.
|
||||
- Invoke the `link_conport_items` tool.
|
||||
- step: 5
|
||||
action: "Confirm Outcome"
|
||||
details: "Inform the user of the success or failure of the `link_conport_items` tool call."
|
||||
general_principles:
|
||||
- "Be helpful, not intrusive. If the user declines a suggestion, accept and move on."
|
||||
- "Prioritize clear, strong relationships over tenuous ones."
|
||||
- "This strategy complements the general `proactive_logging_cue` by providing specific guidance for link creation."
|
||||
35
.roomodes
Normal file
35
.roomodes
Normal file
@@ -0,0 +1,35 @@
|
||||
customModes:
|
||||
- slug: project-research
|
||||
name: 🔍 Project Research
|
||||
roleDefinition: |
|
||||
You are a detailed-oriented research assistant specializing in examining and understanding codebases. Your primary responsibility is to analyze the file structure, content, and dependencies of a given project to provide comprehensive context relevant to specific user queries.
|
||||
whenToUse: |
|
||||
Use this mode when you need to thoroughly investigate and understand a codebase structure, analyze project architecture, or gather comprehensive context about existing implementations. Ideal for onboarding to new projects, understanding complex codebases, or researching how specific features are implemented across the project.
|
||||
description: Investigate and analyze codebase structure
|
||||
groups:
|
||||
- read
|
||||
source: project
|
||||
customInstructions: |
|
||||
Your role is to deeply investigate and summarize the structure and implementation details of the project codebase. To achieve this effectively, you must:
|
||||
|
||||
1. Start by carefully examining the file structure of the entire project, with a particular emphasis on files located within the "docs" folder. These files typically contain crucial context, architectural explanations, and usage guidelines.
|
||||
|
||||
2. When given a specific query, systematically identify and gather all relevant context from:
|
||||
- Documentation files in the "docs" folder that provide background information, specifications, or architectural insights.
|
||||
- Relevant type definitions and interfaces, explicitly citing their exact location (file path and line number) within the source code.
|
||||
- Implementations directly related to the query, clearly noting their file locations and providing concise yet comprehensive summaries of how they function.
|
||||
- Important dependencies, libraries, or modules involved in the implementation, including their usage context and significance to the query.
|
||||
|
||||
3. Deliver a structured, detailed report that clearly outlines:
|
||||
- An overview of relevant documentation insights.
|
||||
- Specific type definitions and their exact locations.
|
||||
- Relevant implementations, including file paths, functions or methods involved, and a brief explanation of their roles.
|
||||
- Critical dependencies and their roles in relation to the query.
|
||||
|
||||
4. Always cite precise file paths, function names, and line numbers to enhance clarity and ease of navigation.
|
||||
|
||||
5. Organize your findings in logical sections, making it straightforward for the user to understand the project's structure and implementation status relevant to their request.
|
||||
|
||||
6. Ensure your response directly addresses the user's query and helps them fully grasp the relevant aspects of the project's current state.
|
||||
|
||||
These specific instructions supersede any conflicting general instructions you might otherwise follow. Your detailed report should enable effective decision-making and next steps within the overall workflow.
|
||||
@@ -1,247 +0,0 @@
|
||||
<#
|
||||
.Synopsis
|
||||
Activate a Python virtual environment for the current PowerShell session.
|
||||
|
||||
.Description
|
||||
Pushes the python executable for a virtual environment to the front of the
|
||||
$Env:PATH environment variable and sets the prompt to signify that you are
|
||||
in a Python virtual environment. Makes use of the command line switches as
|
||||
well as the `pyvenv.cfg` file values present in the virtual environment.
|
||||
|
||||
.Parameter VenvDir
|
||||
Path to the directory that contains the virtual environment to activate. The
|
||||
default value for this is the parent of the directory that the Activate.ps1
|
||||
script is located within.
|
||||
|
||||
.Parameter Prompt
|
||||
The prompt prefix to display when this virtual environment is activated. By
|
||||
default, this prompt is the name of the virtual environment folder (VenvDir)
|
||||
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
|
||||
|
||||
.Example
|
||||
Activate.ps1
|
||||
Activates the Python virtual environment that contains the Activate.ps1 script.
|
||||
|
||||
.Example
|
||||
Activate.ps1 -Verbose
|
||||
Activates the Python virtual environment that contains the Activate.ps1 script,
|
||||
and shows extra information about the activation as it executes.
|
||||
|
||||
.Example
|
||||
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
|
||||
Activates the Python virtual environment located in the specified location.
|
||||
|
||||
.Example
|
||||
Activate.ps1 -Prompt "MyPython"
|
||||
Activates the Python virtual environment that contains the Activate.ps1 script,
|
||||
and prefixes the current prompt with the specified string (surrounded in
|
||||
parentheses) while the virtual environment is active.
|
||||
|
||||
.Notes
|
||||
On Windows, it may be required to enable this Activate.ps1 script by setting the
|
||||
execution policy for the user. You can do this by issuing the following PowerShell
|
||||
command:
|
||||
|
||||
PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
|
||||
|
||||
For more information on Execution Policies:
|
||||
https://go.microsoft.com/fwlink/?LinkID=135170
|
||||
|
||||
#>
|
||||
Param(
|
||||
[Parameter(Mandatory = $false)]
|
||||
[String]
|
||||
$VenvDir,
|
||||
[Parameter(Mandatory = $false)]
|
||||
[String]
|
||||
$Prompt
|
||||
)
|
||||
|
||||
<# Function declarations --------------------------------------------------- #>
|
||||
|
||||
<#
|
||||
.Synopsis
|
||||
Remove all shell session elements added by the Activate script, including the
|
||||
addition of the virtual environment's Python executable from the beginning of
|
||||
the PATH variable.
|
||||
|
||||
.Parameter NonDestructive
|
||||
If present, do not remove this function from the global namespace for the
|
||||
session.
|
||||
|
||||
#>
|
||||
function global:deactivate ([switch]$NonDestructive) {
|
||||
# Revert to original values
|
||||
|
||||
# The prior prompt:
|
||||
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
|
||||
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
|
||||
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
|
||||
}
|
||||
|
||||
# The prior PYTHONHOME:
|
||||
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
|
||||
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
|
||||
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
|
||||
}
|
||||
|
||||
# The prior PATH:
|
||||
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
|
||||
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
|
||||
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
|
||||
}
|
||||
|
||||
# Just remove the VIRTUAL_ENV altogether:
|
||||
if (Test-Path -Path Env:VIRTUAL_ENV) {
|
||||
Remove-Item -Path env:VIRTUAL_ENV
|
||||
}
|
||||
|
||||
# Just remove VIRTUAL_ENV_PROMPT altogether.
|
||||
if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
|
||||
Remove-Item -Path env:VIRTUAL_ENV_PROMPT
|
||||
}
|
||||
|
||||
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
|
||||
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
|
||||
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
|
||||
}
|
||||
|
||||
# Leave deactivate function in the global namespace if requested:
|
||||
if (-not $NonDestructive) {
|
||||
Remove-Item -Path function:deactivate
|
||||
}
|
||||
}
|
||||
|
||||
<#
|
||||
.Description
|
||||
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
|
||||
given folder, and returns them in a map.
|
||||
|
||||
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
|
||||
two strings separated by `=` (with any amount of whitespace surrounding the =)
|
||||
then it is considered a `key = value` line. The left hand string is the key,
|
||||
the right hand is the value.
|
||||
|
||||
If the value starts with a `'` or a `"` then the first and last character is
|
||||
stripped from the value before being captured.
|
||||
|
||||
.Parameter ConfigDir
|
||||
Path to the directory that contains the `pyvenv.cfg` file.
|
||||
#>
|
||||
function Get-PyVenvConfig(
|
||||
[String]
|
||||
$ConfigDir
|
||||
) {
|
||||
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
|
||||
|
||||
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
|
||||
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
|
||||
|
||||
# An empty map will be returned if no config file is found.
|
||||
$pyvenvConfig = @{ }
|
||||
|
||||
if ($pyvenvConfigPath) {
|
||||
|
||||
Write-Verbose "File exists, parse `key = value` lines"
|
||||
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
|
||||
|
||||
$pyvenvConfigContent | ForEach-Object {
|
||||
$keyval = $PSItem -split "\s*=\s*", 2
|
||||
if ($keyval[0] -and $keyval[1]) {
|
||||
$val = $keyval[1]
|
||||
|
||||
# Remove extraneous quotations around a string value.
|
||||
if ("'""".Contains($val.Substring(0, 1))) {
|
||||
$val = $val.Substring(1, $val.Length - 2)
|
||||
}
|
||||
|
||||
$pyvenvConfig[$keyval[0]] = $val
|
||||
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
|
||||
}
|
||||
}
|
||||
}
|
||||
return $pyvenvConfig
|
||||
}
|
||||
|
||||
|
||||
<# Begin Activate script --------------------------------------------------- #>
|
||||
|
||||
# Determine the containing directory of this script
|
||||
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
|
||||
$VenvExecDir = Get-Item -Path $VenvExecPath
|
||||
|
||||
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
|
||||
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
|
||||
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
|
||||
|
||||
# Set values required in priority: CmdLine, ConfigFile, Default
|
||||
# First, get the location of the virtual environment, it might not be
|
||||
# VenvExecDir if specified on the command line.
|
||||
if ($VenvDir) {
|
||||
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
|
||||
}
|
||||
else {
|
||||
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
|
||||
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
|
||||
Write-Verbose "VenvDir=$VenvDir"
|
||||
}
|
||||
|
||||
# Next, read the `pyvenv.cfg` file to determine any required value such
|
||||
# as `prompt`.
|
||||
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
|
||||
|
||||
# Next, set the prompt from the command line, or the config file, or
|
||||
# just use the name of the virtual environment folder.
|
||||
if ($Prompt) {
|
||||
Write-Verbose "Prompt specified as argument, using '$Prompt'"
|
||||
}
|
||||
else {
|
||||
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
|
||||
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
|
||||
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
|
||||
$Prompt = $pyvenvCfg['prompt'];
|
||||
}
|
||||
else {
|
||||
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
|
||||
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
|
||||
$Prompt = Split-Path -Path $venvDir -Leaf
|
||||
}
|
||||
}
|
||||
|
||||
Write-Verbose "Prompt = '$Prompt'"
|
||||
Write-Verbose "VenvDir='$VenvDir'"
|
||||
|
||||
# Deactivate any currently active virtual environment, but leave the
|
||||
# deactivate function in place.
|
||||
deactivate -nondestructive
|
||||
|
||||
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
|
||||
# that there is an activated venv.
|
||||
$env:VIRTUAL_ENV = $VenvDir
|
||||
|
||||
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
|
||||
|
||||
Write-Verbose "Setting prompt to '$Prompt'"
|
||||
|
||||
# Set the prompt to include the env name
|
||||
# Make sure _OLD_VIRTUAL_PROMPT is global
|
||||
function global:_OLD_VIRTUAL_PROMPT { "" }
|
||||
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
|
||||
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
|
||||
|
||||
function global:prompt {
|
||||
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
|
||||
_OLD_VIRTUAL_PROMPT
|
||||
}
|
||||
$env:VIRTUAL_ENV_PROMPT = $Prompt
|
||||
}
|
||||
|
||||
# Clear PYTHONHOME
|
||||
if (Test-Path -Path Env:PYTHONHOME) {
|
||||
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
|
||||
Remove-Item -Path Env:PYTHONHOME
|
||||
}
|
||||
|
||||
# Add the venv to the PATH
|
||||
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
|
||||
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
|
||||
@@ -1,70 +0,0 @@
|
||||
# This file must be used with "source bin/activate" *from bash*
|
||||
# You cannot run it directly
|
||||
|
||||
deactivate () {
|
||||
# reset old environment variables
|
||||
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
|
||||
PATH="${_OLD_VIRTUAL_PATH:-}"
|
||||
export PATH
|
||||
unset _OLD_VIRTUAL_PATH
|
||||
fi
|
||||
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
|
||||
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
|
||||
export PYTHONHOME
|
||||
unset _OLD_VIRTUAL_PYTHONHOME
|
||||
fi
|
||||
|
||||
# Call hash to forget past commands. Without forgetting
|
||||
# past commands the $PATH changes we made may not be respected
|
||||
hash -r 2> /dev/null
|
||||
|
||||
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
|
||||
PS1="${_OLD_VIRTUAL_PS1:-}"
|
||||
export PS1
|
||||
unset _OLD_VIRTUAL_PS1
|
||||
fi
|
||||
|
||||
unset VIRTUAL_ENV
|
||||
unset VIRTUAL_ENV_PROMPT
|
||||
if [ ! "${1:-}" = "nondestructive" ] ; then
|
||||
# Self destruct!
|
||||
unset -f deactivate
|
||||
fi
|
||||
}
|
||||
|
||||
# unset irrelevant variables
|
||||
deactivate nondestructive
|
||||
|
||||
# on Windows, a path can contain colons and backslashes and has to be converted:
|
||||
if [ "${OSTYPE:-}" = "cygwin" ] || [ "${OSTYPE:-}" = "msys" ] ; then
|
||||
# transform D:\path\to\venv to /d/path/to/venv on MSYS
|
||||
# and to /cygdrive/d/path/to/venv on Cygwin
|
||||
export VIRTUAL_ENV=$(cygpath "/home/pac/thrillwiki/thrillwiki_django_no_react/.venv")
|
||||
else
|
||||
# use the path as-is
|
||||
export VIRTUAL_ENV="/home/pac/thrillwiki/thrillwiki_django_no_react/.venv"
|
||||
fi
|
||||
|
||||
_OLD_VIRTUAL_PATH="$PATH"
|
||||
PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
export PATH
|
||||
|
||||
# unset PYTHONHOME if set
|
||||
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
|
||||
# could use `if (set -u; : $PYTHONHOME) ;` in bash
|
||||
if [ -n "${PYTHONHOME:-}" ] ; then
|
||||
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
|
||||
unset PYTHONHOME
|
||||
fi
|
||||
|
||||
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
|
||||
_OLD_VIRTUAL_PS1="${PS1:-}"
|
||||
PS1="(.venv) ${PS1:-}"
|
||||
export PS1
|
||||
VIRTUAL_ENV_PROMPT="(.venv) "
|
||||
export VIRTUAL_ENV_PROMPT
|
||||
fi
|
||||
|
||||
# Call hash to forget past commands. Without forgetting
|
||||
# past commands the $PATH changes we made may not be respected
|
||||
hash -r 2> /dev/null
|
||||
@@ -1,27 +0,0 @@
|
||||
# This file must be used with "source bin/activate.csh" *from csh*.
|
||||
# You cannot run it directly.
|
||||
|
||||
# Created by Davide Di Blasi <davidedb@gmail.com>.
|
||||
# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
|
||||
|
||||
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
|
||||
|
||||
# Unset irrelevant variables.
|
||||
deactivate nondestructive
|
||||
|
||||
setenv VIRTUAL_ENV "/home/pac/thrillwiki/thrillwiki_django_no_react/.venv"
|
||||
|
||||
set _OLD_VIRTUAL_PATH="$PATH"
|
||||
setenv PATH "$VIRTUAL_ENV/bin:$PATH"
|
||||
|
||||
|
||||
set _OLD_VIRTUAL_PROMPT="$prompt"
|
||||
|
||||
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
|
||||
set prompt = "(.venv) $prompt"
|
||||
setenv VIRTUAL_ENV_PROMPT "(.venv) "
|
||||
endif
|
||||
|
||||
alias pydoc python -m pydoc
|
||||
|
||||
rehash
|
||||
@@ -1,69 +0,0 @@
|
||||
# This file must be used with "source <venv>/bin/activate.fish" *from fish*
|
||||
# (https://fishshell.com/). You cannot run it directly.
|
||||
|
||||
function deactivate -d "Exit virtual environment and return to normal shell environment"
|
||||
# reset old environment variables
|
||||
if test -n "$_OLD_VIRTUAL_PATH"
|
||||
set -gx PATH $_OLD_VIRTUAL_PATH
|
||||
set -e _OLD_VIRTUAL_PATH
|
||||
end
|
||||
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
|
||||
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
|
||||
set -e _OLD_VIRTUAL_PYTHONHOME
|
||||
end
|
||||
|
||||
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
|
||||
set -e _OLD_FISH_PROMPT_OVERRIDE
|
||||
# prevents error when using nested fish instances (Issue #93858)
|
||||
if functions -q _old_fish_prompt
|
||||
functions -e fish_prompt
|
||||
functions -c _old_fish_prompt fish_prompt
|
||||
functions -e _old_fish_prompt
|
||||
end
|
||||
end
|
||||
|
||||
set -e VIRTUAL_ENV
|
||||
set -e VIRTUAL_ENV_PROMPT
|
||||
if test "$argv[1]" != "nondestructive"
|
||||
# Self-destruct!
|
||||
functions -e deactivate
|
||||
end
|
||||
end
|
||||
|
||||
# Unset irrelevant variables.
|
||||
deactivate nondestructive
|
||||
|
||||
set -gx VIRTUAL_ENV "/home/pac/thrillwiki/thrillwiki_django_no_react/.venv"
|
||||
|
||||
set -gx _OLD_VIRTUAL_PATH $PATH
|
||||
set -gx PATH "$VIRTUAL_ENV/bin" $PATH
|
||||
|
||||
# Unset PYTHONHOME if set.
|
||||
if set -q PYTHONHOME
|
||||
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
|
||||
set -e PYTHONHOME
|
||||
end
|
||||
|
||||
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
|
||||
# fish uses a function instead of an env var to generate the prompt.
|
||||
|
||||
# Save the current fish_prompt function as the function _old_fish_prompt.
|
||||
functions -c fish_prompt _old_fish_prompt
|
||||
|
||||
# With the original prompt function renamed, we can override with our own.
|
||||
function fish_prompt
|
||||
# Save the return status of the last command.
|
||||
set -l old_status $status
|
||||
|
||||
# Output the venv prompt; color taken from the blue of the Python logo.
|
||||
printf "%s%s%s" (set_color 4B8BBE) "(.venv) " (set_color normal)
|
||||
|
||||
# Restore the return status of the previous command.
|
||||
echo "exit $old_status" | .
|
||||
# Output the original/"old" prompt.
|
||||
_old_fish_prompt
|
||||
end
|
||||
|
||||
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
|
||||
set -gx VIRTUAL_ENV_PROMPT "(.venv) "
|
||||
end
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from automat._visualize import tool
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(tool())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from black import patched_main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(patched_main())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from blackd import patched_main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(patched_main())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from twisted.conch.scripts.cftp import run
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(run())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from twisted.conch.scripts.ckeygen import run
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(run())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from twisted.conch.scripts.conch import run
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(run())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from coverage.cmdline import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from coverage.cmdline import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from coverage.cmdline import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from daphne.cli import CommandLineInterface
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(CommandLineInterface.entrypoint())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from django.core.management import execute_from_command_line
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(execute_from_command_line())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from dotenv.__main__ import cli
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(cli())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from flake8.main.cli import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from twisted.mail.scripts.mailmail import run
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(run())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from charset_normalizer.cli import cli_detect
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(cli_detect())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pip._internal.cli.main import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pytest import console_main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(console_main())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pycodestyle import _main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(_main())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pyflakes.api import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from twisted.scripts.htmlizer import run
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(run())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from pytest import console_main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(console_main())
|
||||
@@ -1 +0,0 @@
|
||||
python3
|
||||
@@ -1 +0,0 @@
|
||||
/usr/local/bin/python3
|
||||
@@ -1 +0,0 @@
|
||||
python3
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from sqlparse.__main__ import main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(main())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from twisted.conch.scripts.tkconch import run
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(run())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from twisted.scripts.trial import run
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(run())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from twisted.application.twist._twist import Twist
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(Twist.main())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from twisted.scripts.twistd import run
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(run())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from autobahn.__main__ import _main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(_main())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from autobahn.xbr._cli import _main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(_main())
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/home/pac/thrillwiki/thrillwiki_django_no_react/.venv/bin/python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
from autobahn.xbr._gui import _main
|
||||
if __name__ == '__main__':
|
||||
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
||||
sys.exit(_main())
|
||||
Binary file not shown.
@@ -1 +0,0 @@
|
||||
pip
|
||||
@@ -1,21 +0,0 @@
|
||||
Copyright (c) 2014
|
||||
Rackspace
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
@@ -1,199 +0,0 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: Automat
|
||||
Version: 24.8.1
|
||||
Summary: Self-service finite-state machines for the programmer on the go.
|
||||
Author-email: Glyph <code@glyph.im>
|
||||
License: Copyright (c) 2014
|
||||
Rackspace
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
Project-URL: Documentation, https://automat.readthedocs.io/
|
||||
Project-URL: Source, https://github.com/glyph/automat/
|
||||
Keywords: fsm,state machine,automata
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: MIT License
|
||||
Classifier: Operating System :: OS Independent
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.11
|
||||
Classifier: Programming Language :: Python :: 3.12
|
||||
Requires-Python: >=3.8
|
||||
Description-Content-Type: text/markdown
|
||||
License-File: LICENSE
|
||||
Requires-Dist: typing-extensions; python_version < "3.10"
|
||||
Provides-Extra: visualize
|
||||
Requires-Dist: graphviz>0.5.1; extra == "visualize"
|
||||
Requires-Dist: Twisted>=16.1.1; extra == "visualize"
|
||||
|
||||
# Automat #
|
||||
|
||||
[](http://automat.readthedocs.io/en/latest/)
|
||||
[](https://github.com/glyph/automat/actions/workflows/ci.yml?query=branch%3Atrunk)
|
||||
[](http://codecov.io/github/glyph/automat?branch=trunk)
|
||||
|
||||
## Self-service finite-state machines for the programmer on the go. ##
|
||||
|
||||
Automat is a library for concise, idiomatic Python expression of finite-state
|
||||
automata (particularly deterministic finite-state transducers).
|
||||
|
||||
Read more here, or on [Read the Docs](https://automat.readthedocs.io/), or watch the following videos for an overview and presentation
|
||||
|
||||
### Why use state machines? ###
|
||||
|
||||
Sometimes you have to create an object whose behavior varies with its state,
|
||||
but still wishes to present a consistent interface to its callers.
|
||||
|
||||
For example, let's say you're writing the software for a coffee machine. It
|
||||
has a lid that can be opened or closed, a chamber for water, a chamber for
|
||||
coffee beans, and a button for "brew".
|
||||
|
||||
There are a number of possible states for the coffee machine. It might or
|
||||
might not have water. It might or might not have beans. The lid might be open
|
||||
or closed. The "brew" button should only actually attempt to brew coffee in
|
||||
one of these configurations, and the "open lid" button should only work if the
|
||||
coffee is not, in fact, brewing.
|
||||
|
||||
With diligence and attention to detail, you can implement this correctly using
|
||||
a collection of attributes on an object; `hasWater`, `hasBeans`, `isLidOpen`
|
||||
and so on. However, you have to keep all these attributes consistent. As the
|
||||
coffee maker becomes more complex - perhaps you add an additional chamber for
|
||||
flavorings so you can make hazelnut coffee, for example - you have to keep
|
||||
adding more and more checks and more and more reasoning about which
|
||||
combinations of states are allowed.
|
||||
|
||||
Rather than adding tedious `if` checks to every single method to make sure that
|
||||
each of these flags are exactly what you expect, you can use a state machine to
|
||||
ensure that if your code runs at all, it will be run with all the required
|
||||
values initialized, because they have to be called in the order you declare
|
||||
them.
|
||||
|
||||
You can read about state machines and their advantages for Python programmers
|
||||
in more detail [in this excellent article by Jean-Paul
|
||||
Calderone](https://web.archive.org/web/20160507053658/https://clusterhq.com/2013/12/05/what-is-a-state-machine/).
|
||||
|
||||
### What makes Automat different? ###
|
||||
|
||||
There are
|
||||
[dozens of libraries on PyPI implementing state machines](https://pypi.org/search/?q=finite+state+machine).
|
||||
So it behooves me to say why yet another one would be a good idea.
|
||||
|
||||
Automat is designed around this principle: while organizing your code around
|
||||
state machines is a good idea, your callers don't, and shouldn't have to, care
|
||||
that you've done so. In Python, the "input" to a stateful system is a method
|
||||
call; the "output" may be a method call, if you need to invoke a side effect,
|
||||
or a return value, if you are just performing a computation in memory. Most
|
||||
other state-machine libraries require you to explicitly create an input object,
|
||||
provide that object to a generic "input" method, and then receive results,
|
||||
sometimes in terms of that library's interfaces and sometimes in terms of
|
||||
classes you define yourself.
|
||||
|
||||
For example, a snippet of the coffee-machine example above might be implemented
|
||||
as follows in naive Python:
|
||||
|
||||
```python
|
||||
class CoffeeMachine(object):
|
||||
def brewButton(self) -> None:
|
||||
if self.hasWater and self.hasBeans and not self.isLidOpen:
|
||||
self.heatTheHeatingElement()
|
||||
# ...
|
||||
```
|
||||
|
||||
With Automat, you'd begin with a `typing.Protocol` that describes all of your
|
||||
inputs:
|
||||
|
||||
```python
|
||||
from typing import Protocol
|
||||
|
||||
class CoffeeBrewer(Protocol):
|
||||
def brewButton(self) -> None:
|
||||
"The user pressed the 'brew' button."
|
||||
def putInBeans(self) -> None:
|
||||
"The user put in some beans."
|
||||
```
|
||||
|
||||
We'll then need a concrete class to contain the shared core of state shared
|
||||
among the different states:
|
||||
|
||||
```python
|
||||
from dataclasses import dataclass
|
||||
|
||||
@dataclass
|
||||
class BrewerCore:
|
||||
heatingElement: HeatingElement
|
||||
```
|
||||
|
||||
Next, we need to describe our state machine, including all of our states. For
|
||||
simplicity's sake let's say that the only two states are `noBeans` and
|
||||
`haveBeans`:
|
||||
|
||||
```python
|
||||
from automat import TypeMachineBuilder
|
||||
|
||||
builder = TypeMachineBuilder(CoffeeBrewer, BrewerCore)
|
||||
noBeans = builder.state("noBeans")
|
||||
haveBeans = builder.state("haveBeans")
|
||||
```
|
||||
|
||||
Next we can describe a simple transition; when we put in beans, we move to the
|
||||
`haveBeans` state, with no other behavior.
|
||||
|
||||
```python
|
||||
# When we don't have beans, upon putting in beans, we will then have beans
|
||||
noBeans.upon(CoffeeBrewer.putInBeans).to(haveBeans).returns(None)
|
||||
```
|
||||
|
||||
And then another transition that we describe with a decorator, one that *does*
|
||||
have some behavior, that needs to heat up the heating element to brew the
|
||||
coffee:
|
||||
|
||||
```python
|
||||
@haveBeans.upon(CoffeeBrewer.brewButton).to(noBeans)
|
||||
def heatUp(inputs: CoffeeBrewer, core: BrewerCore) -> None:
|
||||
"""
|
||||
When we have beans, upon pressing the brew button, we will then not have
|
||||
beans any more (as they have been entered into the brewing chamber) and
|
||||
our output will be heating the heating element.
|
||||
"""
|
||||
print("Brewing the coffee...")
|
||||
core.heatingElement.turnOn()
|
||||
```
|
||||
|
||||
Then we finalize the state machine by building it, which gives us a callable
|
||||
that takes a `BrewerCore` and returns a synthetic `CoffeeBrewer`
|
||||
|
||||
```python
|
||||
newCoffeeMachine = builder.build()
|
||||
```
|
||||
|
||||
```python
|
||||
>>> coffee = newCoffeeMachine(BrewerCore(HeatingElement()))
|
||||
>>> machine.putInBeans()
|
||||
>>> machine.brewButton()
|
||||
Brewing the coffee...
|
||||
```
|
||||
|
||||
All of the *inputs* are provided by calling them like methods, all of the
|
||||
*output behaviors* are automatically invoked when they are produced according
|
||||
to the outputs specified to `upon` and all of the states are simply opaque
|
||||
tokens.
|
||||
@@ -1,39 +0,0 @@
|
||||
../../../bin/automat-visualize,sha256=IsKluasmdX-znO8xCKiVIZ1zYKnBxs3ykFbJ3SxJ4vs,266
|
||||
Automat-24.8.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
Automat-24.8.1.dist-info/LICENSE,[AWS-SECRET-REMOVED]zv_-9TA43Y,1053
|
||||
Automat-24.8.1.dist-info/METADATA,sha256=XJIxL4Olb15WCoAdVEcORum39__wiCXQR6LNubERZ6M,8396
|
||||
Automat-24.8.1.dist-info/RECORD,,
|
||||
Automat-24.8.1.dist-info/WHEEL,sha256=HiCZjzuy6Dw0hdX5R3LCFPDmFS4BWl8H-8W39XfmgX4,91
|
||||
Automat-24.8.1.dist-info/entry_points.txt,[AWS-SECRET-REMOVED]rPNlGi7NbQ,62
|
||||
Automat-24.8.1.dist-info/top_level.txt,sha256=vg4zAOyhP_3YCmpKZLNgFw1uMF3lC_b6TKsdz7jBSpI,8
|
||||
automat/__init__.py,[AWS-SECRET-REMOVED]WD_RltPB-U,356
|
||||
automat/__pycache__/__init__.cpython-312.pyc,,
|
||||
automat/__pycache__/_core.cpython-312.pyc,,
|
||||
automat/__pycache__/_discover.cpython-312.pyc,,
|
||||
automat/__pycache__/_introspection.cpython-312.pyc,,
|
||||
automat/__pycache__/_methodical.cpython-312.pyc,,
|
||||
automat/__pycache__/_runtimeproto.cpython-312.pyc,,
|
||||
automat/__pycache__/_typed.cpython-312.pyc,,
|
||||
automat/__pycache__/_visualize.cpython-312.pyc,,
|
||||
automat/_core.py,sha256=oe4QNlfvmgsnKe_8fyNiOsHsfz5xPArGuXWle9zePp8,6663
|
||||
automat/_discover.py,sha256=KRbmm7kxpd-WReDQU4qe6hVKGUKmGBHUjYIkRneO4mc,5197
|
||||
automat/_introspection.py,sha256=uF5ymY-GZckyRxvRs7UToPBV_oVV6xHmvlBVey9nv80,1441
|
||||
automat/_methodical.py,sha256=YgZXraDe6dvK6w_Y9xfPa0kXCka4ZeGLfcb4IfK4bnI,17243
|
||||
automat/_runtimeproto.py,sha256=mJ_[AWS-SECRET-REMOVED],1654
|
||||
automat/_test/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
automat/_test/__pycache__/__init__.cpython-312.pyc,,
|
||||
automat/_test/__pycache__/test_core.cpython-312.pyc,,
|
||||
automat/_test/__pycache__/test_discover.cpython-312.pyc,,
|
||||
automat/_test/__pycache__/test_methodical.cpython-312.pyc,,
|
||||
automat/_test/__pycache__/test_trace.cpython-312.pyc,,
|
||||
automat/_test/__pycache__/test_type_based.cpython-312.pyc,,
|
||||
automat/_test/__pycache__/test_visualize.cpython-312.pyc,,
|
||||
automat/_test/test_core.py,sha256=PJHNvQ85i8vjH-oF6nPNKB84_noTyl2dQSv_iRl70J8,3481
|
||||
automat/_test/test_discover.py,[AWS-SECRET-REMOVED]Gg7kQnbvtw,22067
|
||||
automat/_test/test_methodical.py,sha256=SKMMbl-6bjH-QZ1hDFRItCOyKF4SstuA4QvExnVhn7w,20267
|
||||
automat/_test/test_trace.py,sha256=tty7P_ctJtk38ZXnpmEy-J9Rn-Hh2AKb_ia6EmtXSQI,3299
|
||||
automat/_test/test_type_based.py,[AWS-SECRET-REMOVED]AegU_OUvko,17872
|
||||
automat/_test/test_visualize.py,sha256=HXBPgMAD0OTz_l1Yq0lI3d1vJ_l3sHTH67Jauaf-2sk,14631
|
||||
automat/_typed.py,sha256=lMzMgUfX713Xw_W4pr8iyPqcdpRSbu4rEpRlrXAOW2k,24204
|
||||
automat/_visualize.py,sha256=DQYig2mBKX-LquPEEy89Y_qyj21tSutAUaFsF1F64ws,6512
|
||||
automat/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
@@ -1,5 +0,0 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: setuptools (72.2.0)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
[console_scripts]
|
||||
automat-visualize = automat._visualize:tool
|
||||
@@ -1 +0,0 @@
|
||||
automat
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1 +0,0 @@
|
||||
pip
|
||||
@@ -1,27 +0,0 @@
|
||||
Copyright (c) Django Software Foundation and individual contributors.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of Django nor the names of its contributors may be used
|
||||
to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
@@ -1,288 +0,0 @@
|
||||
Django is licensed under the three-clause BSD license; see the file
|
||||
LICENSE for details.
|
||||
|
||||
Django includes code from the Python standard library, which is licensed under
|
||||
the Python license, a permissive open source license. The copyright and license
|
||||
is included below for compliance with Python's terms.
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
||||
Copyright (c) 2001-present Python Software Foundation; All Rights Reserved
|
||||
|
||||
A. HISTORY OF THE SOFTWARE
|
||||
==========================
|
||||
|
||||
Python was created in the early 1990s by Guido van Rossum at Stichting
|
||||
Mathematisch Centrum (CWI, see https://www.cwi.nl) in the Netherlands
|
||||
as a successor of a language called ABC. Guido remains Python's
|
||||
principal author, although it includes many contributions from others.
|
||||
|
||||
In 1995, Guido continued his work on Python at the Corporation for
|
||||
National Research Initiatives (CNRI, see https://www.cnri.reston.va.us)
|
||||
in Reston, Virginia where he released several versions of the
|
||||
software.
|
||||
|
||||
In May 2000, Guido and the Python core development team moved to
|
||||
BeOpen.com to form the BeOpen PythonLabs team. In October of the same
|
||||
year, the PythonLabs team moved to Digital Creations, which became
|
||||
Zope Corporation. In 2001, the Python Software Foundation (PSF, see
|
||||
https://www.python.org/psf/) was formed, a non-profit organization
|
||||
created specifically to own Python-related Intellectual Property.
|
||||
Zope Corporation was a sponsoring member of the PSF.
|
||||
|
||||
All Python releases are Open Source (see https://opensource.org for
|
||||
the Open Source Definition). Historically, most, but not all, Python
|
||||
releases have also been GPL-compatible; the table below summarizes
|
||||
the various releases.
|
||||
|
||||
Release Derived Year Owner GPL-
|
||||
from compatible? (1)
|
||||
|
||||
0.9.0 thru 1.2 1991-1995 CWI yes
|
||||
1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
|
||||
1.6 1.5.2 2000 CNRI no
|
||||
2.0 1.6 2000 BeOpen.com no
|
||||
1.6.1 1.6 2001 CNRI yes (2)
|
||||
2.1 2.0+1.6.1 2001 PSF no
|
||||
2.0.1 2.0+1.6.1 2001 PSF yes
|
||||
2.1.1 2.1+2.0.1 2001 PSF yes
|
||||
2.1.2 2.1.1 2002 PSF yes
|
||||
2.1.3 2.1.2 2002 PSF yes
|
||||
2.2 and above 2.1.1 2001-now PSF yes
|
||||
|
||||
Footnotes:
|
||||
|
||||
(1) GPL-compatible doesn't mean that we're distributing Python under
|
||||
the GPL. All Python licenses, unlike the GPL, let you distribute
|
||||
a modified version without making your changes open source. The
|
||||
GPL-compatible licenses make it possible to combine Python with
|
||||
other software that is released under the GPL; the others don't.
|
||||
|
||||
(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
|
||||
because its license has a choice of law clause. According to
|
||||
CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
|
||||
is "not incompatible" with the GPL.
|
||||
|
||||
Thanks to the many outside volunteers who have worked under Guido's
|
||||
direction to make these releases possible.
|
||||
|
||||
|
||||
B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
|
||||
[AWS-SECRET-REMOVED]=======================
|
||||
|
||||
Python software and documentation are licensed under the
|
||||
Python Software Foundation License Version 2.
|
||||
|
||||
Starting with Python 3.8.6, examples, recipes, and other code in
|
||||
the documentation are dual licensed under the PSF License Version 2
|
||||
and the Zero-Clause BSD license.
|
||||
|
||||
Some software incorporated into Python is under different licenses.
|
||||
The licenses are listed with code falling under that license.
|
||||
|
||||
|
||||
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
||||
--------------------------------------------
|
||||
|
||||
1. This LICENSE AGREEMENT is between the Python Software Foundation
|
||||
("PSF"), and the Individual or Organization ("Licensee") accessing and
|
||||
otherwise using this software ("Python") in source or binary form and
|
||||
its associated documentation.
|
||||
|
||||
2. Subject to the terms and conditions of this License Agreement, PSF hereby
|
||||
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
|
||||
analyze, test, perform and/or display publicly, prepare derivative works,
|
||||
distribute, and otherwise use Python alone or in any derivative version,
|
||||
provided, however, that PSF's License Agreement and PSF's notice of copyright,
|
||||
i.e., "Copyright (c) 2001-2024 Python Software Foundation; All Rights Reserved"
|
||||
are retained in Python alone or in any derivative version prepared by Licensee.
|
||||
|
||||
3. In the event Licensee prepares a derivative work that is based on
|
||||
or incorporates Python or any part thereof, and wants to make
|
||||
the derivative work available to others as provided herein, then
|
||||
Licensee hereby agrees to include in any such work a brief summary of
|
||||
the changes made to Python.
|
||||
|
||||
4. PSF is making Python available to Licensee on an "AS IS"
|
||||
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
|
||||
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
|
||||
INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
|
||||
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
||||
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
||||
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
|
||||
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
|
||||
6. This License Agreement will automatically terminate upon a material
|
||||
breach of its terms and conditions.
|
||||
|
||||
7. Nothing in this License Agreement shall be deemed to create any
|
||||
relationship of agency, partnership, or joint venture between PSF and
|
||||
Licensee. This License Agreement does not grant permission to use PSF
|
||||
trademarks or trade name in a trademark sense to endorse or promote
|
||||
products or services of Licensee, or any third party.
|
||||
|
||||
8. By copying, installing or otherwise using Python, Licensee
|
||||
agrees to be bound by the terms and conditions of this License
|
||||
Agreement.
|
||||
|
||||
|
||||
BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
|
||||
-------------------------------------------
|
||||
|
||||
BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
|
||||
|
||||
1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
|
||||
office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
|
||||
Individual or Organization ("Licensee") accessing and otherwise using
|
||||
this software in source or binary form and its associated
|
||||
documentation ("the Software").
|
||||
|
||||
2. Subject to the terms and conditions of this BeOpen Python License
|
||||
Agreement, BeOpen hereby grants Licensee a non-exclusive,
|
||||
royalty-free, world-wide license to reproduce, analyze, test, perform
|
||||
and/or display publicly, prepare derivative works, distribute, and
|
||||
otherwise use the Software alone or in any derivative version,
|
||||
provided, however, that the BeOpen Python License is retained in the
|
||||
Software, alone or in any derivative version prepared by Licensee.
|
||||
|
||||
3. BeOpen is making the Software available to Licensee on an "AS IS"
|
||||
basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
|
||||
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
|
||||
INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
|
||||
4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
|
||||
SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
|
||||
AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
|
||||
DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
|
||||
5. This License Agreement will automatically terminate upon a material
|
||||
breach of its terms and conditions.
|
||||
|
||||
6. This License Agreement shall be governed by and interpreted in all
|
||||
respects by the law of the State of California, excluding conflict of
|
||||
law provisions. Nothing in this License Agreement shall be deemed to
|
||||
create any relationship of agency, partnership, or joint venture
|
||||
between BeOpen and Licensee. This License Agreement does not grant
|
||||
permission to use BeOpen trademarks or trade names in a trademark
|
||||
sense to endorse or promote products or services of Licensee, or any
|
||||
third party. As an exception, the "BeOpen Python" logos available at
|
||||
http://www.pythonlabs.com/logos.html may be used according to the
|
||||
permissions granted on that web page.
|
||||
|
||||
7. By copying, installing or otherwise using the software, Licensee
|
||||
agrees to be bound by the terms and conditions of this License
|
||||
Agreement.
|
||||
|
||||
|
||||
CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
|
||||
---------------------------------------
|
||||
|
||||
1. This LICENSE AGREEMENT is between the Corporation for National
|
||||
Research Initiatives, having an office at 1895 Preston White Drive,
|
||||
Reston, VA 20191 ("CNRI"), and the Individual or Organization
|
||||
("Licensee") accessing and otherwise using Python 1.6.1 software in
|
||||
source or binary form and its associated documentation.
|
||||
|
||||
2. Subject to the terms and conditions of this License Agreement, CNRI
|
||||
hereby grants Licensee a nonexclusive, royalty-free, world-wide
|
||||
license to reproduce, analyze, test, perform and/or display publicly,
|
||||
prepare derivative works, distribute, and otherwise use Python 1.6.1
|
||||
alone or in any derivative version, provided, however, that CNRI's
|
||||
License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
|
||||
1995-2001 Corporation for National Research Initiatives; All Rights
|
||||
Reserved" are retained in Python 1.6.1 alone or in any derivative
|
||||
version prepared by Licensee. Alternately, in lieu of CNRI's License
|
||||
Agreement, Licensee may substitute the following text (omitting the
|
||||
quotes): "Python 1.6.1 is made available subject to the terms and
|
||||
conditions in CNRI's License Agreement. This Agreement together with
|
||||
Python 1.6.1 may be located on the internet using the following
|
||||
unique, persistent identifier (known as a handle): 1895.22/1013. This
|
||||
Agreement may also be obtained from a proxy server on the internet
|
||||
using the following URL: http://hdl.handle.net/1895.22/1013".
|
||||
|
||||
3. In the event Licensee prepares a derivative work that is based on
|
||||
or incorporates Python 1.6.1 or any part thereof, and wants to make
|
||||
the derivative work available to others as provided herein, then
|
||||
Licensee hereby agrees to include in any such work a brief summary of
|
||||
the changes made to Python 1.6.1.
|
||||
|
||||
4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
|
||||
basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
|
||||
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
|
||||
INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
|
||||
5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
||||
1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
||||
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
|
||||
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
|
||||
6. This License Agreement will automatically terminate upon a material
|
||||
breach of its terms and conditions.
|
||||
|
||||
7. This License Agreement shall be governed by the federal
|
||||
intellectual property law of the United States, including without
|
||||
limitation the federal copyright law, and, to the extent such
|
||||
U.S. federal law does not apply, by the law of the Commonwealth of
|
||||
Virginia, excluding Virginia's conflict of law provisions.
|
||||
Notwithstanding the foregoing, with regard to derivative works based
|
||||
on Python 1.6.1 that incorporate non-separable material that was
|
||||
previously distributed under the GNU General Public License (GPL), the
|
||||
law of the Commonwealth of Virginia shall govern this License
|
||||
Agreement only as to issues arising under or with respect to
|
||||
Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
|
||||
License Agreement shall be deemed to create any relationship of
|
||||
agency, partnership, or joint venture between CNRI and Licensee. This
|
||||
License Agreement does not grant permission to use CNRI trademarks or
|
||||
trade name in a trademark sense to endorse or promote products or
|
||||
services of Licensee, or any third party.
|
||||
|
||||
8. By clicking on the "ACCEPT" button where indicated, or by copying,
|
||||
installing or otherwise using Python 1.6.1, Licensee agrees to be
|
||||
bound by the terms and conditions of this License Agreement.
|
||||
|
||||
ACCEPT
|
||||
|
||||
|
||||
CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
|
||||
--------------------------------------------------
|
||||
|
||||
Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
|
||||
The Netherlands. All rights reserved.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software and its
|
||||
documentation for any purpose and without fee is hereby granted,
|
||||
provided that the above copyright notice appear in all copies and that
|
||||
both that copyright notice and this permission notice appear in
|
||||
supporting documentation, and that the name of Stichting Mathematisch
|
||||
Centrum or CWI not be used in advertising or publicity pertaining to
|
||||
distribution of the software without specific, written prior
|
||||
permission.
|
||||
|
||||
STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
|
||||
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
|
||||
FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION
|
||||
----------------------------------------------------------------------
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
||||
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
PERFORMANCE OF THIS SOFTWARE.
|
||||
@@ -1,100 +0,0 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: Django
|
||||
Version: 5.1.2
|
||||
Summary: A high-level Python web framework that encourages rapid development and clean, pragmatic design.
|
||||
Author-email: Django Software Foundation <foundation@djangoproject.com>
|
||||
License: BSD-3-Clause
|
||||
Project-URL: Homepage, https://www.djangoproject.com/
|
||||
Project-URL: Documentation, https://docs.djangoproject.com/
|
||||
Project-URL: Release notes, https://docs.djangoproject.com/en/stable/releases/
|
||||
Project-URL: Funding, https://www.djangoproject.com/fundraising/
|
||||
Project-URL: Source, https://github.com/django/django
|
||||
Project-URL: Tracker, https://code.djangoproject.com/
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Environment :: Web Environment
|
||||
Classifier: Framework :: Django
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: BSD License
|
||||
Classifier: Operating System :: OS Independent
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3 :: Only
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.11
|
||||
Classifier: Programming Language :: Python :: 3.12
|
||||
Classifier: Topic :: Internet :: WWW/HTTP
|
||||
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
|
||||
Classifier: Topic :: Internet :: WWW/HTTP :: WSGI
|
||||
Classifier: Topic :: Software Development :: Libraries :: Application Frameworks
|
||||
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
||||
Requires-Python: >=3.10
|
||||
Description-Content-Type: text/x-rst
|
||||
License-File: LICENSE
|
||||
License-File: LICENSE.python
|
||||
License-File: AUTHORS
|
||||
Requires-Dist: asgiref<4,>=3.8.1
|
||||
Requires-Dist: sqlparse>=0.3.1
|
||||
Requires-Dist: tzdata; sys_platform == "win32"
|
||||
Provides-Extra: argon2
|
||||
Requires-Dist: argon2-cffi>=19.1.0; extra == "argon2"
|
||||
Provides-Extra: bcrypt
|
||||
Requires-Dist: bcrypt; extra == "bcrypt"
|
||||
|
||||
======
|
||||
Django
|
||||
======
|
||||
|
||||
Django is a high-level Python web framework that encourages rapid development
|
||||
and clean, pragmatic design. Thanks for checking it out.
|
||||
|
||||
All documentation is in the "``docs``" directory and online at
|
||||
https://docs.djangoproject.com/en/stable/. If you're just getting started,
|
||||
here's how we recommend you read the docs:
|
||||
|
||||
* First, read ``docs/intro/install.txt`` for instructions on installing Django.
|
||||
|
||||
* Next, work through the tutorials in order (``docs/intro/tutorial01.txt``,
|
||||
``docs/intro/tutorial02.txt``, etc.).
|
||||
|
||||
* If you want to set up an actual deployment server, read
|
||||
``docs/howto/deployment/index.txt`` for instructions.
|
||||
|
||||
* You'll probably want to read through the topical guides (in ``docs/topics``)
|
||||
next; from there you can jump to the HOWTOs (in ``docs/howto``) for specific
|
||||
problems, and check out the reference (``docs/ref``) for gory details.
|
||||
|
||||
* See ``docs/README`` for instructions on building an HTML version of the docs.
|
||||
|
||||
Docs are updated rigorously. If you find any problems in the docs, or think
|
||||
they should be clarified in any way, please take 30 seconds to fill out a
|
||||
ticket here: https://code.djangoproject.com/newticket
|
||||
|
||||
To get more help:
|
||||
|
||||
* Join the ``#django`` channel on ``irc.libera.chat``. Lots of helpful people
|
||||
hang out there. `Webchat is available <https://web.libera.chat/#django>`_.
|
||||
|
||||
* Join the django-users mailing list, or read the archives, at
|
||||
https://groups.google.com/group/django-users.
|
||||
|
||||
* Join the `Django Discord community <https://discord.gg/xcRH6mN4fa>`_.
|
||||
|
||||
* Join the community on the `Django Forum <https://forum.djangoproject.com/>`_.
|
||||
|
||||
To contribute to Django:
|
||||
|
||||
* Check out https://docs.djangoproject.com/en/dev/internals/contributing/ for
|
||||
information about getting involved.
|
||||
|
||||
To run Django's test suite:
|
||||
|
||||
* Follow the instructions in the "Unit tests" section of
|
||||
``docs/internals/contributing/writing-code/unit-tests.txt``, published online at
|
||||
https://docs.djangoproject.[AWS-SECRET-REMOVED]g-code/unit-tests/#running-the-unit-tests
|
||||
|
||||
Supporting the Development of Django
|
||||
====================================
|
||||
|
||||
Django's development depends on your contributions.
|
||||
|
||||
If you depend on Django, remember to support the Django Software Foundation: https://www.djangoproject.com/fundraising/
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +0,0 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.44.0)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
[console_scripts]
|
||||
django-admin = django.core.management:execute_from_command_line
|
||||
@@ -1 +0,0 @@
|
||||
django
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,31 +0,0 @@
|
||||
# Copyright (C) AB Strakt
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
pyOpenSSL - A simple wrapper around the OpenSSL library
|
||||
"""
|
||||
|
||||
from OpenSSL import SSL, crypto
|
||||
from OpenSSL.version import (
|
||||
__author__,
|
||||
__copyright__,
|
||||
__email__,
|
||||
__license__,
|
||||
__summary__,
|
||||
__title__,
|
||||
__uri__,
|
||||
__version__,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"SSL",
|
||||
"crypto",
|
||||
"__author__",
|
||||
"__copyright__",
|
||||
"__email__",
|
||||
"__license__",
|
||||
"__summary__",
|
||||
"__title__",
|
||||
"__uri__",
|
||||
"__version__",
|
||||
]
|
||||
@@ -1,124 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
from typing import Any, Callable, NoReturn, Type, Union
|
||||
|
||||
from cryptography.hazmat.bindings.openssl.binding import Binding
|
||||
|
||||
StrOrBytesPath = Union[str, bytes, os.PathLike]
|
||||
|
||||
binding = Binding()
|
||||
ffi = binding.ffi
|
||||
lib = binding.lib
|
||||
|
||||
|
||||
# This is a special CFFI allocator that does not bother to zero its memory
|
||||
# after allocation. This has vastly better performance on large allocations and
|
||||
# so should be used whenever we don't need the memory zeroed out.
|
||||
no_zero_allocator = ffi.new_allocator(should_clear_after_alloc=False)
|
||||
|
||||
|
||||
def text(charp: Any) -> str:
|
||||
"""
|
||||
Get a native string type representing of the given CFFI ``char*`` object.
|
||||
|
||||
:param charp: A C-style string represented using CFFI.
|
||||
|
||||
:return: :class:`str`
|
||||
"""
|
||||
if not charp:
|
||||
return ""
|
||||
return ffi.string(charp).decode("utf-8")
|
||||
|
||||
|
||||
def exception_from_error_queue(exception_type: Type[Exception]) -> NoReturn:
|
||||
"""
|
||||
Convert an OpenSSL library failure into a Python exception.
|
||||
|
||||
When a call to the native OpenSSL library fails, this is usually signalled
|
||||
by the return value, and an error code is stored in an error queue
|
||||
associated with the current thread. The err library provides functions to
|
||||
obtain these error codes and textual error messages.
|
||||
"""
|
||||
errors = []
|
||||
|
||||
while True:
|
||||
error = lib.ERR_get_error()
|
||||
if error == 0:
|
||||
break
|
||||
errors.append(
|
||||
(
|
||||
text(lib.ERR_lib_error_string(error)),
|
||||
text(lib.ERR_func_error_string(error)),
|
||||
text(lib.ERR_reason_error_string(error)),
|
||||
)
|
||||
)
|
||||
|
||||
raise exception_type(errors)
|
||||
|
||||
|
||||
def make_assert(error: Type[Exception]) -> Callable[[bool], Any]:
|
||||
"""
|
||||
Create an assert function that uses :func:`exception_from_error_queue` to
|
||||
raise an exception wrapped by *error*.
|
||||
"""
|
||||
|
||||
def openssl_assert(ok: bool) -> None:
|
||||
"""
|
||||
If *ok* is not True, retrieve the error from OpenSSL and raise it.
|
||||
"""
|
||||
if ok is not True:
|
||||
exception_from_error_queue(error)
|
||||
|
||||
return openssl_assert
|
||||
|
||||
|
||||
def path_bytes(s: StrOrBytesPath) -> bytes:
|
||||
"""
|
||||
Convert a Python path to a :py:class:`bytes` for the path which can be
|
||||
passed into an OpenSSL API accepting a filename.
|
||||
|
||||
:param s: A path (valid for os.fspath).
|
||||
|
||||
:return: An instance of :py:class:`bytes`.
|
||||
"""
|
||||
b = os.fspath(s)
|
||||
|
||||
if isinstance(b, str):
|
||||
return b.encode(sys.getfilesystemencoding())
|
||||
else:
|
||||
return b
|
||||
|
||||
|
||||
def byte_string(s: str) -> bytes:
|
||||
return s.encode("charmap")
|
||||
|
||||
|
||||
# A marker object to observe whether some optional arguments are passed any
|
||||
# value or not.
|
||||
UNSPECIFIED = object()
|
||||
|
||||
_TEXT_WARNING = "str for {0} is no longer accepted, use bytes"
|
||||
|
||||
|
||||
def text_to_bytes_and_warn(label: str, obj: Any) -> Any:
|
||||
"""
|
||||
If ``obj`` is text, emit a warning that it should be bytes instead and try
|
||||
to convert it to bytes automatically.
|
||||
|
||||
:param str label: The name of the parameter from which ``obj`` was taken
|
||||
(so a developer can easily find the source of the problem and correct
|
||||
it).
|
||||
|
||||
:return: If ``obj`` is the text string type, a ``bytes`` object giving the
|
||||
UTF-8 encoding of that text is returned. Otherwise, ``obj`` itself is
|
||||
returned.
|
||||
"""
|
||||
if isinstance(obj, str):
|
||||
warnings.warn(
|
||||
_TEXT_WARNING.format(label),
|
||||
category=DeprecationWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
return obj.encode("utf-8")
|
||||
return obj
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,40 +0,0 @@
|
||||
import ssl
|
||||
import sys
|
||||
|
||||
import cffi
|
||||
import cryptography
|
||||
|
||||
import OpenSSL.SSL
|
||||
|
||||
from . import version
|
||||
|
||||
_env_info = """\
|
||||
pyOpenSSL: {pyopenssl}
|
||||
cryptography: {cryptography}
|
||||
cffi: {cffi}
|
||||
cryptography's compiled against OpenSSL: {crypto_openssl_compile}
|
||||
cryptography's linked OpenSSL: {crypto_openssl_link}
|
||||
Python's OpenSSL: {python_openssl}
|
||||
Python executable: {python}
|
||||
Python version: {python_version}
|
||||
Platform: {platform}
|
||||
sys.path: {sys_path}""".format(
|
||||
pyopenssl=version.__version__,
|
||||
crypto_openssl_compile=OpenSSL._util.ffi.string(
|
||||
OpenSSL._util.lib.OPENSSL_VERSION_TEXT,
|
||||
).decode("ascii"),
|
||||
crypto_openssl_link=OpenSSL.SSL.SSLeay_version(
|
||||
OpenSSL.SSL.SSLEAY_VERSION
|
||||
).decode("ascii"),
|
||||
python_openssl=getattr(ssl, "OPENSSL_VERSION", "n/a"),
|
||||
cryptography=cryptography.__version__,
|
||||
cffi=cffi.__version__,
|
||||
python=sys.executable,
|
||||
python_version=sys.version,
|
||||
platform=sys.platform,
|
||||
sys_path=sys.path,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print(_env_info)
|
||||
@@ -1,40 +0,0 @@
|
||||
"""
|
||||
PRNG management routines, thin wrappers.
|
||||
"""
|
||||
|
||||
from OpenSSL._util import lib as _lib
|
||||
|
||||
|
||||
def add(buffer: bytes, entropy: int) -> None:
|
||||
"""
|
||||
Mix bytes from *string* into the PRNG state.
|
||||
|
||||
The *entropy* argument is (the lower bound of) an estimate of how much
|
||||
randomness is contained in *string*, measured in bytes.
|
||||
|
||||
For more information, see e.g. :rfc:`1750`.
|
||||
|
||||
This function is only relevant if you are forking Python processes and
|
||||
need to reseed the CSPRNG after fork.
|
||||
|
||||
:param buffer: Buffer with random data.
|
||||
:param entropy: The entropy (in bytes) measurement of the buffer.
|
||||
|
||||
:return: :obj:`None`
|
||||
"""
|
||||
if not isinstance(buffer, bytes):
|
||||
raise TypeError("buffer must be a byte string")
|
||||
|
||||
if not isinstance(entropy, int):
|
||||
raise TypeError("entropy must be an integer")
|
||||
|
||||
_lib.RAND_add(buffer, len(buffer), entropy)
|
||||
|
||||
|
||||
def status() -> int:
|
||||
"""
|
||||
Check whether the PRNG has been seeded with enough data.
|
||||
|
||||
:return: 1 if the PRNG is seeded enough, 0 otherwise.
|
||||
"""
|
||||
return _lib.RAND_status()
|
||||
@@ -1,28 +0,0 @@
|
||||
# Copyright (C) AB Strakt
|
||||
# Copyright (C) Jean-Paul Calderone
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
pyOpenSSL - A simple wrapper around the OpenSSL library
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
"__author__",
|
||||
"__copyright__",
|
||||
"__email__",
|
||||
"__license__",
|
||||
"__summary__",
|
||||
"__title__",
|
||||
"__uri__",
|
||||
"__version__",
|
||||
]
|
||||
|
||||
__version__ = "24.2.1"
|
||||
|
||||
__title__ = "pyOpenSSL"
|
||||
__uri__ = "https://pyopenssl.org/"
|
||||
__summary__ = "Python wrapper module around the OpenSSL library"
|
||||
__author__ = "The pyOpenSSL developers"
|
||||
__email__ = "cryptography-dev@python.org"
|
||||
__license__ = "Apache License, Version 2.0"
|
||||
__copyright__ = f"Copyright 2001-2024 {__author__}"
|
||||
@@ -1,133 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# bitmap distribution font (bdf) file parser
|
||||
#
|
||||
# history:
|
||||
# 1996-05-16 fl created (as bdf2pil)
|
||||
# 1997-08-25 fl converted to FontFile driver
|
||||
# 2001-05-25 fl removed bogus __init__ call
|
||||
# 2002-11-20 fl robustification (from Kevin Cazabon, Dmitry Vasiliev)
|
||||
# 2003-04-22 fl more robustification (from Graham Dumpleton)
|
||||
#
|
||||
# Copyright (c) 1997-2003 by Secret Labs AB.
|
||||
# Copyright (c) 1997-2003 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
"""
|
||||
Parse X Bitmap Distribution Format (BDF)
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import BinaryIO
|
||||
|
||||
from . import FontFile, Image
|
||||
|
||||
bdf_slant = {
|
||||
"R": "Roman",
|
||||
"I": "Italic",
|
||||
"O": "Oblique",
|
||||
"RI": "Reverse Italic",
|
||||
"RO": "Reverse Oblique",
|
||||
"OT": "Other",
|
||||
}
|
||||
|
||||
bdf_spacing = {"P": "Proportional", "M": "Monospaced", "C": "Cell"}
|
||||
|
||||
|
||||
def bdf_char(
|
||||
f: BinaryIO,
|
||||
) -> (
|
||||
tuple[
|
||||
str,
|
||||
int,
|
||||
tuple[tuple[int, int], tuple[int, int, int, int], tuple[int, int, int, int]],
|
||||
Image.Image,
|
||||
]
|
||||
| None
|
||||
):
|
||||
# skip to STARTCHAR
|
||||
while True:
|
||||
s = f.readline()
|
||||
if not s:
|
||||
return None
|
||||
if s[:9] == b"STARTCHAR":
|
||||
break
|
||||
id = s[9:].strip().decode("ascii")
|
||||
|
||||
# load symbol properties
|
||||
props = {}
|
||||
while True:
|
||||
s = f.readline()
|
||||
if not s or s[:6] == b"BITMAP":
|
||||
break
|
||||
i = s.find(b" ")
|
||||
props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii")
|
||||
|
||||
# load bitmap
|
||||
bitmap = bytearray()
|
||||
while True:
|
||||
s = f.readline()
|
||||
if not s or s[:7] == b"ENDCHAR":
|
||||
break
|
||||
bitmap += s[:-1]
|
||||
|
||||
# The word BBX
|
||||
# followed by the width in x (BBw), height in y (BBh),
|
||||
# and x and y displacement (BBxoff0, BByoff0)
|
||||
# of the lower left corner from the origin of the character.
|
||||
width, height, x_disp, y_disp = (int(p) for p in props["BBX"].split())
|
||||
|
||||
# The word DWIDTH
|
||||
# followed by the width in x and y of the character in device pixels.
|
||||
dwx, dwy = (int(p) for p in props["DWIDTH"].split())
|
||||
|
||||
bbox = (
|
||||
(dwx, dwy),
|
||||
(x_disp, -y_disp - height, width + x_disp, -y_disp),
|
||||
(0, 0, width, height),
|
||||
)
|
||||
|
||||
try:
|
||||
im = Image.frombytes("1", (width, height), bitmap, "hex", "1")
|
||||
except ValueError:
|
||||
# deal with zero-width characters
|
||||
im = Image.new("1", (width, height))
|
||||
|
||||
return id, int(props["ENCODING"]), bbox, im
|
||||
|
||||
|
||||
class BdfFontFile(FontFile.FontFile):
|
||||
"""Font file plugin for the X11 BDF format."""
|
||||
|
||||
def __init__(self, fp: BinaryIO) -> None:
|
||||
super().__init__()
|
||||
|
||||
s = fp.readline()
|
||||
if s[:13] != b"STARTFONT 2.1":
|
||||
msg = "not a valid BDF file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
props = {}
|
||||
comments = []
|
||||
|
||||
while True:
|
||||
s = fp.readline()
|
||||
if not s or s[:13] == b"ENDPROPERTIES":
|
||||
break
|
||||
i = s.find(b" ")
|
||||
props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii")
|
||||
if s[:i] in [b"COMMENT", b"COPYRIGHT"]:
|
||||
if s.find(b"LogicalFontDescription") < 0:
|
||||
comments.append(s[i + 1 : -1].decode("ascii"))
|
||||
|
||||
while True:
|
||||
c = bdf_char(fp)
|
||||
if not c:
|
||||
break
|
||||
id, ch, (xy, dst, src), im = c
|
||||
if 0 <= ch < len(self.glyph):
|
||||
self.glyph[ch] = xy, dst, src, im
|
||||
@@ -1,493 +0,0 @@
|
||||
"""
|
||||
Blizzard Mipmap Format (.blp)
|
||||
Jerome Leclanche <jerome@leclan.ch>
|
||||
|
||||
The contents of this file are hereby released in the public domain (CC0)
|
||||
Full text of the CC0 license:
|
||||
https://creativecommons.org/publicdomain/zero/1.0/
|
||||
|
||||
BLP1 files, used mostly in Warcraft III, are not fully supported.
|
||||
All types of BLP2 files used in World of Warcraft are supported.
|
||||
|
||||
The BLP file structure consists of a header, up to 16 mipmaps of the
|
||||
texture
|
||||
|
||||
Texture sizes must be powers of two, though the two dimensions do
|
||||
not have to be equal; 512x256 is valid, but 512x200 is not.
|
||||
The first mipmap (mipmap #0) is the full size image; each subsequent
|
||||
mipmap halves both dimensions. The final mipmap should be 1x1.
|
||||
|
||||
BLP files come in many different flavours:
|
||||
* JPEG-compressed (type == 0) - only supported for BLP1.
|
||||
* RAW images (type == 1, encoding == 1). Each mipmap is stored as an
|
||||
array of 8-bit values, one per pixel, left to right, top to bottom.
|
||||
Each value is an index to the palette.
|
||||
* DXT-compressed (type == 1, encoding == 2):
|
||||
- DXT1 compression is used if alpha_encoding == 0.
|
||||
- An additional alpha bit is used if alpha_depth == 1.
|
||||
- DXT3 compression is used if alpha_encoding == 1.
|
||||
- DXT5 compression is used if alpha_encoding == 7.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import os
|
||||
import struct
|
||||
from enum import IntEnum
|
||||
from io import BytesIO
|
||||
from typing import IO
|
||||
|
||||
from . import Image, ImageFile
|
||||
|
||||
|
||||
class Format(IntEnum):
|
||||
JPEG = 0
|
||||
|
||||
|
||||
class Encoding(IntEnum):
|
||||
UNCOMPRESSED = 1
|
||||
DXT = 2
|
||||
UNCOMPRESSED_RAW_BGRA = 3
|
||||
|
||||
|
||||
class AlphaEncoding(IntEnum):
|
||||
DXT1 = 0
|
||||
DXT3 = 1
|
||||
DXT5 = 7
|
||||
|
||||
|
||||
def unpack_565(i: int) -> tuple[int, int, int]:
|
||||
return ((i >> 11) & 0x1F) << 3, ((i >> 5) & 0x3F) << 2, (i & 0x1F) << 3
|
||||
|
||||
|
||||
def decode_dxt1(
|
||||
data: bytes, alpha: bool = False
|
||||
) -> tuple[bytearray, bytearray, bytearray, bytearray]:
|
||||
"""
|
||||
input: one "row" of data (i.e. will produce 4*width pixels)
|
||||
"""
|
||||
|
||||
blocks = len(data) // 8 # number of blocks in row
|
||||
ret = (bytearray(), bytearray(), bytearray(), bytearray())
|
||||
|
||||
for block_index in range(blocks):
|
||||
# Decode next 8-byte block.
|
||||
idx = block_index * 8
|
||||
color0, color1, bits = struct.unpack_from("<HHI", data, idx)
|
||||
|
||||
r0, g0, b0 = unpack_565(color0)
|
||||
r1, g1, b1 = unpack_565(color1)
|
||||
|
||||
# Decode this block into 4x4 pixels
|
||||
# Accumulate the results onto our 4 row accumulators
|
||||
for j in range(4):
|
||||
for i in range(4):
|
||||
# get next control op and generate a pixel
|
||||
|
||||
control = bits & 3
|
||||
bits = bits >> 2
|
||||
|
||||
a = 0xFF
|
||||
if control == 0:
|
||||
r, g, b = r0, g0, b0
|
||||
elif control == 1:
|
||||
r, g, b = r1, g1, b1
|
||||
elif control == 2:
|
||||
if color0 > color1:
|
||||
r = (2 * r0 + r1) // 3
|
||||
g = (2 * g0 + g1) // 3
|
||||
b = (2 * b0 + b1) // 3
|
||||
else:
|
||||
r = (r0 + r1) // 2
|
||||
g = (g0 + g1) // 2
|
||||
b = (b0 + b1) // 2
|
||||
elif control == 3:
|
||||
if color0 > color1:
|
||||
r = (2 * r1 + r0) // 3
|
||||
g = (2 * g1 + g0) // 3
|
||||
b = (2 * b1 + b0) // 3
|
||||
else:
|
||||
r, g, b, a = 0, 0, 0, 0
|
||||
|
||||
if alpha:
|
||||
ret[j].extend([r, g, b, a])
|
||||
else:
|
||||
ret[j].extend([r, g, b])
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def decode_dxt3(data: bytes) -> tuple[bytearray, bytearray, bytearray, bytearray]:
|
||||
"""
|
||||
input: one "row" of data (i.e. will produce 4*width pixels)
|
||||
"""
|
||||
|
||||
blocks = len(data) // 16 # number of blocks in row
|
||||
ret = (bytearray(), bytearray(), bytearray(), bytearray())
|
||||
|
||||
for block_index in range(blocks):
|
||||
idx = block_index * 16
|
||||
block = data[idx : idx + 16]
|
||||
# Decode next 16-byte block.
|
||||
bits = struct.unpack_from("<8B", block)
|
||||
color0, color1 = struct.unpack_from("<HH", block, 8)
|
||||
|
||||
(code,) = struct.unpack_from("<I", block, 12)
|
||||
|
||||
r0, g0, b0 = unpack_565(color0)
|
||||
r1, g1, b1 = unpack_565(color1)
|
||||
|
||||
for j in range(4):
|
||||
high = False # Do we want the higher bits?
|
||||
for i in range(4):
|
||||
alphacode_index = (4 * j + i) // 2
|
||||
a = bits[alphacode_index]
|
||||
if high:
|
||||
high = False
|
||||
a >>= 4
|
||||
else:
|
||||
high = True
|
||||
a &= 0xF
|
||||
a *= 17 # We get a value between 0 and 15
|
||||
|
||||
color_code = (code >> 2 * (4 * j + i)) & 0x03
|
||||
|
||||
if color_code == 0:
|
||||
r, g, b = r0, g0, b0
|
||||
elif color_code == 1:
|
||||
r, g, b = r1, g1, b1
|
||||
elif color_code == 2:
|
||||
r = (2 * r0 + r1) // 3
|
||||
g = (2 * g0 + g1) // 3
|
||||
b = (2 * b0 + b1) // 3
|
||||
elif color_code == 3:
|
||||
r = (2 * r1 + r0) // 3
|
||||
g = (2 * g1 + g0) // 3
|
||||
b = (2 * b1 + b0) // 3
|
||||
|
||||
ret[j].extend([r, g, b, a])
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def decode_dxt5(data: bytes) -> tuple[bytearray, bytearray, bytearray, bytearray]:
|
||||
"""
|
||||
input: one "row" of data (i.e. will produce 4 * width pixels)
|
||||
"""
|
||||
|
||||
blocks = len(data) // 16 # number of blocks in row
|
||||
ret = (bytearray(), bytearray(), bytearray(), bytearray())
|
||||
|
||||
for block_index in range(blocks):
|
||||
idx = block_index * 16
|
||||
block = data[idx : idx + 16]
|
||||
# Decode next 16-byte block.
|
||||
a0, a1 = struct.unpack_from("<BB", block)
|
||||
|
||||
bits = struct.unpack_from("<6B", block, 2)
|
||||
alphacode1 = bits[2] | (bits[3] << 8) | (bits[4] << 16) | (bits[5] << 24)
|
||||
alphacode2 = bits[0] | (bits[1] << 8)
|
||||
|
||||
color0, color1 = struct.unpack_from("<HH", block, 8)
|
||||
|
||||
(code,) = struct.unpack_from("<I", block, 12)
|
||||
|
||||
r0, g0, b0 = unpack_565(color0)
|
||||
r1, g1, b1 = unpack_565(color1)
|
||||
|
||||
for j in range(4):
|
||||
for i in range(4):
|
||||
# get next control op and generate a pixel
|
||||
alphacode_index = 3 * (4 * j + i)
|
||||
|
||||
if alphacode_index <= 12:
|
||||
alphacode = (alphacode2 >> alphacode_index) & 0x07
|
||||
elif alphacode_index == 15:
|
||||
alphacode = (alphacode2 >> 15) | ((alphacode1 << 1) & 0x06)
|
||||
else: # alphacode_index >= 18 and alphacode_index <= 45
|
||||
alphacode = (alphacode1 >> (alphacode_index - 16)) & 0x07
|
||||
|
||||
if alphacode == 0:
|
||||
a = a0
|
||||
elif alphacode == 1:
|
||||
a = a1
|
||||
elif a0 > a1:
|
||||
a = ((8 - alphacode) * a0 + (alphacode - 1) * a1) // 7
|
||||
elif alphacode == 6:
|
||||
a = 0
|
||||
elif alphacode == 7:
|
||||
a = 255
|
||||
else:
|
||||
a = ((6 - alphacode) * a0 + (alphacode - 1) * a1) // 5
|
||||
|
||||
color_code = (code >> 2 * (4 * j + i)) & 0x03
|
||||
|
||||
if color_code == 0:
|
||||
r, g, b = r0, g0, b0
|
||||
elif color_code == 1:
|
||||
r, g, b = r1, g1, b1
|
||||
elif color_code == 2:
|
||||
r = (2 * r0 + r1) // 3
|
||||
g = (2 * g0 + g1) // 3
|
||||
b = (2 * b0 + b1) // 3
|
||||
elif color_code == 3:
|
||||
r = (2 * r1 + r0) // 3
|
||||
g = (2 * g1 + g0) // 3
|
||||
b = (2 * b1 + b0) // 3
|
||||
|
||||
ret[j].extend([r, g, b, a])
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
class BLPFormatError(NotImplementedError):
|
||||
pass
|
||||
|
||||
|
||||
def _accept(prefix: bytes) -> bool:
|
||||
return prefix[:4] in (b"BLP1", b"BLP2")
|
||||
|
||||
|
||||
class BlpImageFile(ImageFile.ImageFile):
|
||||
"""
|
||||
Blizzard Mipmap Format
|
||||
"""
|
||||
|
||||
format = "BLP"
|
||||
format_description = "Blizzard Mipmap Format"
|
||||
|
||||
def _open(self) -> None:
|
||||
self.magic = self.fp.read(4)
|
||||
|
||||
self.fp.seek(5, os.SEEK_CUR)
|
||||
(self._blp_alpha_depth,) = struct.unpack("<b", self.fp.read(1))
|
||||
|
||||
self.fp.seek(2, os.SEEK_CUR)
|
||||
self._size = struct.unpack("<II", self.fp.read(8))
|
||||
|
||||
if self.magic in (b"BLP1", b"BLP2"):
|
||||
decoder = self.magic.decode()
|
||||
else:
|
||||
msg = f"Bad BLP magic {repr(self.magic)}"
|
||||
raise BLPFormatError(msg)
|
||||
|
||||
self._mode = "RGBA" if self._blp_alpha_depth else "RGB"
|
||||
self.tile = [ImageFile._Tile(decoder, (0, 0) + self.size, 0, (self.mode, 0, 1))]
|
||||
|
||||
|
||||
class _BLPBaseDecoder(ImageFile.PyDecoder):
|
||||
_pulls_fd = True
|
||||
|
||||
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
|
||||
try:
|
||||
self._read_blp_header()
|
||||
self._load()
|
||||
except struct.error as e:
|
||||
msg = "Truncated BLP file"
|
||||
raise OSError(msg) from e
|
||||
return -1, 0
|
||||
|
||||
@abc.abstractmethod
|
||||
def _load(self) -> None:
|
||||
pass
|
||||
|
||||
def _read_blp_header(self) -> None:
|
||||
assert self.fd is not None
|
||||
self.fd.seek(4)
|
||||
(self._blp_compression,) = struct.unpack("<i", self._safe_read(4))
|
||||
|
||||
(self._blp_encoding,) = struct.unpack("<b", self._safe_read(1))
|
||||
(self._blp_alpha_depth,) = struct.unpack("<b", self._safe_read(1))
|
||||
(self._blp_alpha_encoding,) = struct.unpack("<b", self._safe_read(1))
|
||||
self.fd.seek(1, os.SEEK_CUR) # mips
|
||||
|
||||
self.size = struct.unpack("<II", self._safe_read(8))
|
||||
|
||||
if isinstance(self, BLP1Decoder):
|
||||
# Only present for BLP1
|
||||
(self._blp_encoding,) = struct.unpack("<i", self._safe_read(4))
|
||||
self.fd.seek(4, os.SEEK_CUR) # subtype
|
||||
|
||||
self._blp_offsets = struct.unpack("<16I", self._safe_read(16 * 4))
|
||||
self._blp_lengths = struct.unpack("<16I", self._safe_read(16 * 4))
|
||||
|
||||
def _safe_read(self, length: int) -> bytes:
|
||||
assert self.fd is not None
|
||||
return ImageFile._safe_read(self.fd, length)
|
||||
|
||||
def _read_palette(self) -> list[tuple[int, int, int, int]]:
|
||||
ret = []
|
||||
for i in range(256):
|
||||
try:
|
||||
b, g, r, a = struct.unpack("<4B", self._safe_read(4))
|
||||
except struct.error:
|
||||
break
|
||||
ret.append((b, g, r, a))
|
||||
return ret
|
||||
|
||||
def _read_bgra(self, palette: list[tuple[int, int, int, int]]) -> bytearray:
|
||||
data = bytearray()
|
||||
_data = BytesIO(self._safe_read(self._blp_lengths[0]))
|
||||
while True:
|
||||
try:
|
||||
(offset,) = struct.unpack("<B", _data.read(1))
|
||||
except struct.error:
|
||||
break
|
||||
b, g, r, a = palette[offset]
|
||||
d: tuple[int, ...] = (r, g, b)
|
||||
if self._blp_alpha_depth:
|
||||
d += (a,)
|
||||
data.extend(d)
|
||||
return data
|
||||
|
||||
|
||||
class BLP1Decoder(_BLPBaseDecoder):
|
||||
def _load(self) -> None:
|
||||
if self._blp_compression == Format.JPEG:
|
||||
self._decode_jpeg_stream()
|
||||
|
||||
elif self._blp_compression == 1:
|
||||
if self._blp_encoding in (4, 5):
|
||||
palette = self._read_palette()
|
||||
data = self._read_bgra(palette)
|
||||
self.set_as_raw(data)
|
||||
else:
|
||||
msg = f"Unsupported BLP encoding {repr(self._blp_encoding)}"
|
||||
raise BLPFormatError(msg)
|
||||
else:
|
||||
msg = f"Unsupported BLP compression {repr(self._blp_encoding)}"
|
||||
raise BLPFormatError(msg)
|
||||
|
||||
def _decode_jpeg_stream(self) -> None:
|
||||
from .JpegImagePlugin import JpegImageFile
|
||||
|
||||
(jpeg_header_size,) = struct.unpack("<I", self._safe_read(4))
|
||||
jpeg_header = self._safe_read(jpeg_header_size)
|
||||
assert self.fd is not None
|
||||
self._safe_read(self._blp_offsets[0] - self.fd.tell()) # What IS this?
|
||||
data = self._safe_read(self._blp_lengths[0])
|
||||
data = jpeg_header + data
|
||||
image = JpegImageFile(BytesIO(data))
|
||||
Image._decompression_bomb_check(image.size)
|
||||
if image.mode == "CMYK":
|
||||
decoder_name, extents, offset, args = image.tile[0]
|
||||
assert isinstance(args, tuple)
|
||||
image.tile = [
|
||||
ImageFile._Tile(decoder_name, extents, offset, (args[0], "CMYK"))
|
||||
]
|
||||
r, g, b = image.convert("RGB").split()
|
||||
reversed_image = Image.merge("RGB", (b, g, r))
|
||||
self.set_as_raw(reversed_image.tobytes())
|
||||
|
||||
|
||||
class BLP2Decoder(_BLPBaseDecoder):
|
||||
def _load(self) -> None:
|
||||
palette = self._read_palette()
|
||||
|
||||
assert self.fd is not None
|
||||
self.fd.seek(self._blp_offsets[0])
|
||||
|
||||
if self._blp_compression == 1:
|
||||
# Uncompressed or DirectX compression
|
||||
|
||||
if self._blp_encoding == Encoding.UNCOMPRESSED:
|
||||
data = self._read_bgra(palette)
|
||||
|
||||
elif self._blp_encoding == Encoding.DXT:
|
||||
data = bytearray()
|
||||
if self._blp_alpha_encoding == AlphaEncoding.DXT1:
|
||||
linesize = (self.size[0] + 3) // 4 * 8
|
||||
for yb in range((self.size[1] + 3) // 4):
|
||||
for d in decode_dxt1(
|
||||
self._safe_read(linesize), alpha=bool(self._blp_alpha_depth)
|
||||
):
|
||||
data += d
|
||||
|
||||
elif self._blp_alpha_encoding == AlphaEncoding.DXT3:
|
||||
linesize = (self.size[0] + 3) // 4 * 16
|
||||
for yb in range((self.size[1] + 3) // 4):
|
||||
for d in decode_dxt3(self._safe_read(linesize)):
|
||||
data += d
|
||||
|
||||
elif self._blp_alpha_encoding == AlphaEncoding.DXT5:
|
||||
linesize = (self.size[0] + 3) // 4 * 16
|
||||
for yb in range((self.size[1] + 3) // 4):
|
||||
for d in decode_dxt5(self._safe_read(linesize)):
|
||||
data += d
|
||||
else:
|
||||
msg = f"Unsupported alpha encoding {repr(self._blp_alpha_encoding)}"
|
||||
raise BLPFormatError(msg)
|
||||
else:
|
||||
msg = f"Unknown BLP encoding {repr(self._blp_encoding)}"
|
||||
raise BLPFormatError(msg)
|
||||
|
||||
else:
|
||||
msg = f"Unknown BLP compression {repr(self._blp_compression)}"
|
||||
raise BLPFormatError(msg)
|
||||
|
||||
self.set_as_raw(data)
|
||||
|
||||
|
||||
class BLPEncoder(ImageFile.PyEncoder):
|
||||
_pushes_fd = True
|
||||
|
||||
def _write_palette(self) -> bytes:
|
||||
data = b""
|
||||
assert self.im is not None
|
||||
palette = self.im.getpalette("RGBA", "RGBA")
|
||||
for i in range(len(palette) // 4):
|
||||
r, g, b, a = palette[i * 4 : (i + 1) * 4]
|
||||
data += struct.pack("<4B", b, g, r, a)
|
||||
while len(data) < 256 * 4:
|
||||
data += b"\x00" * 4
|
||||
return data
|
||||
|
||||
def encode(self, bufsize: int) -> tuple[int, int, bytes]:
|
||||
palette_data = self._write_palette()
|
||||
|
||||
offset = 20 + 16 * 4 * 2 + len(palette_data)
|
||||
data = struct.pack("<16I", offset, *((0,) * 15))
|
||||
|
||||
assert self.im is not None
|
||||
w, h = self.im.size
|
||||
data += struct.pack("<16I", w * h, *((0,) * 15))
|
||||
|
||||
data += palette_data
|
||||
|
||||
for y in range(h):
|
||||
for x in range(w):
|
||||
data += struct.pack("<B", self.im.getpixel((x, y)))
|
||||
|
||||
return len(data), 0, data
|
||||
|
||||
|
||||
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
||||
if im.mode != "P":
|
||||
msg = "Unsupported BLP image mode"
|
||||
raise ValueError(msg)
|
||||
|
||||
magic = b"BLP1" if im.encoderinfo.get("blp_version") == "BLP1" else b"BLP2"
|
||||
fp.write(magic)
|
||||
|
||||
assert im.palette is not None
|
||||
fp.write(struct.pack("<i", 1)) # Uncompressed or DirectX compression
|
||||
fp.write(struct.pack("<b", Encoding.UNCOMPRESSED))
|
||||
fp.write(struct.pack("<b", 1 if im.palette.mode == "RGBA" else 0))
|
||||
fp.write(struct.pack("<b", 0)) # alpha encoding
|
||||
fp.write(struct.pack("<b", 0)) # mips
|
||||
fp.write(struct.pack("<II", *im.size))
|
||||
if magic == b"BLP1":
|
||||
fp.write(struct.pack("<i", 5))
|
||||
fp.write(struct.pack("<i", 0))
|
||||
|
||||
ImageFile._save(im, fp, [ImageFile._Tile("BLP", (0, 0) + im.size, 0, im.mode)])
|
||||
|
||||
|
||||
Image.register_open(BlpImageFile.format, BlpImageFile, _accept)
|
||||
Image.register_extension(BlpImageFile.format, ".blp")
|
||||
Image.register_decoder("BLP1", BLP1Decoder)
|
||||
Image.register_decoder("BLP2", BLP2Decoder)
|
||||
|
||||
Image.register_save(BlpImageFile.format, _save)
|
||||
Image.register_encoder("BLP", BLPEncoder)
|
||||
@@ -1,511 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# BMP file handler
|
||||
#
|
||||
# Windows (and OS/2) native bitmap storage format.
|
||||
#
|
||||
# history:
|
||||
# 1995-09-01 fl Created
|
||||
# 1996-04-30 fl Added save
|
||||
# 1997-08-27 fl Fixed save of 1-bit images
|
||||
# 1998-03-06 fl Load P images as L where possible
|
||||
# 1998-07-03 fl Load P images as 1 where possible
|
||||
# 1998-12-29 fl Handle small palettes
|
||||
# 2002-12-30 fl Fixed load of 1-bit palette images
|
||||
# 2003-04-21 fl Fixed load of 1-bit monochrome images
|
||||
# 2003-04-23 fl Added limited support for BI_BITFIELDS compression
|
||||
#
|
||||
# Copyright (c) 1997-2003 by Secret Labs AB
|
||||
# Copyright (c) 1995-2003 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import IO, Any
|
||||
|
||||
from . import Image, ImageFile, ImagePalette
|
||||
from ._binary import i16le as i16
|
||||
from ._binary import i32le as i32
|
||||
from ._binary import o8
|
||||
from ._binary import o16le as o16
|
||||
from ._binary import o32le as o32
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
# Read BMP file
|
||||
|
||||
BIT2MODE = {
|
||||
# bits => mode, rawmode
|
||||
1: ("P", "P;1"),
|
||||
4: ("P", "P;4"),
|
||||
8: ("P", "P"),
|
||||
16: ("RGB", "BGR;15"),
|
||||
24: ("RGB", "BGR"),
|
||||
32: ("RGB", "BGRX"),
|
||||
}
|
||||
|
||||
|
||||
def _accept(prefix: bytes) -> bool:
|
||||
return prefix[:2] == b"BM"
|
||||
|
||||
|
||||
def _dib_accept(prefix: bytes) -> bool:
|
||||
return i32(prefix) in [12, 40, 52, 56, 64, 108, 124]
|
||||
|
||||
|
||||
# [AWS-SECRET-REMOVED]=====================================
|
||||
# Image plugin for the Windows BMP format.
|
||||
# [AWS-SECRET-REMOVED]=====================================
|
||||
class BmpImageFile(ImageFile.ImageFile):
|
||||
"""Image plugin for the Windows Bitmap format (BMP)"""
|
||||
|
||||
# ------------------------------------------------------------- Description
|
||||
format_description = "Windows Bitmap"
|
||||
format = "BMP"
|
||||
|
||||
# -------------------------------------------------- BMP Compression values
|
||||
COMPRESSIONS = {"RAW": 0, "RLE8": 1, "RLE4": 2, "BITFIELDS": 3, "JPEG": 4, "PNG": 5}
|
||||
for k, v in COMPRESSIONS.items():
|
||||
vars()[k] = v
|
||||
|
||||
def _bitmap(self, header: int = 0, offset: int = 0) -> None:
|
||||
"""Read relevant info about the BMP"""
|
||||
read, seek = self.fp.read, self.fp.seek
|
||||
if header:
|
||||
seek(header)
|
||||
# read bmp header size @offset 14 (this is part of the header size)
|
||||
file_info: dict[str, bool | int | tuple[int, ...]] = {
|
||||
"header_size": i32(read(4)),
|
||||
"direction": -1,
|
||||
}
|
||||
|
||||
# -------------------- If requested, read header at a specific position
|
||||
# read the rest of the bmp header, without its size
|
||||
assert isinstance(file_info["header_size"], int)
|
||||
header_data = ImageFile._safe_read(self.fp, file_info["header_size"] - 4)
|
||||
|
||||
# ------------------------------- Windows Bitmap v2, IBM OS/2 Bitmap v1
|
||||
# ----- This format has different offsets because of width/height types
|
||||
# 12: BITMAPCOREHEADER/OS21XBITMAPHEADER
|
||||
if file_info["header_size"] == 12:
|
||||
file_info["width"] = i16(header_data, 0)
|
||||
file_info["height"] = i16(header_data, 2)
|
||||
file_info["planes"] = i16(header_data, 4)
|
||||
file_info["bits"] = i16(header_data, 6)
|
||||
file_info["compression"] = self.COMPRESSIONS["RAW"]
|
||||
file_info["palette_padding"] = 3
|
||||
|
||||
# --------------------------------------------- Windows Bitmap v3 to v5
|
||||
# 40: BITMAPINFOHEADER
|
||||
# 52: BITMAPV2HEADER
|
||||
# 56: BITMAPV3HEADER
|
||||
# 64: BITMAPCOREHEADER2/OS22XBITMAPHEADER
|
||||
# 108: BITMAPV4HEADER
|
||||
# 124: BITMAPV5HEADER
|
||||
elif file_info["header_size"] in (40, 52, 56, 64, 108, 124):
|
||||
file_info["y_flip"] = header_data[7] == 0xFF
|
||||
file_info["direction"] = 1 if file_info["y_flip"] else -1
|
||||
file_info["width"] = i32(header_data, 0)
|
||||
file_info["height"] = (
|
||||
i32(header_data, 4)
|
||||
if not file_info["y_flip"]
|
||||
else 2**32 - i32(header_data, 4)
|
||||
)
|
||||
file_info["planes"] = i16(header_data, 8)
|
||||
file_info["bits"] = i16(header_data, 10)
|
||||
file_info["compression"] = i32(header_data, 12)
|
||||
# byte size of pixel data
|
||||
file_info["data_size"] = i32(header_data, 16)
|
||||
file_info["pixels_per_meter"] = (
|
||||
i32(header_data, 20),
|
||||
i32(header_data, 24),
|
||||
)
|
||||
file_info["colors"] = i32(header_data, 28)
|
||||
file_info["palette_padding"] = 4
|
||||
assert isinstance(file_info["pixels_per_meter"], tuple)
|
||||
self.info["dpi"] = tuple(x / 39.3701 for x in file_info["pixels_per_meter"])
|
||||
if file_info["compression"] == self.COMPRESSIONS["BITFIELDS"]:
|
||||
masks = ["r_mask", "g_mask", "b_mask"]
|
||||
if len(header_data) >= 48:
|
||||
if len(header_data) >= 52:
|
||||
masks.append("a_mask")
|
||||
else:
|
||||
file_info["a_mask"] = 0x0
|
||||
for idx, mask in enumerate(masks):
|
||||
file_info[mask] = i32(header_data, 36 + idx * 4)
|
||||
else:
|
||||
# 40 byte headers only have the three components in the
|
||||
# bitfields masks, ref:
|
||||
# https://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx
|
||||
# See also
|
||||
# https://github.com/python-pillow/Pillow/issues/1293
|
||||
# There is a 4th component in the RGBQuad, in the alpha
|
||||
# location, but it is listed as a reserved component,
|
||||
# and it is not generally an alpha channel
|
||||
file_info["a_mask"] = 0x0
|
||||
for mask in masks:
|
||||
file_info[mask] = i32(read(4))
|
||||
assert isinstance(file_info["r_mask"], int)
|
||||
assert isinstance(file_info["g_mask"], int)
|
||||
assert isinstance(file_info["b_mask"], int)
|
||||
assert isinstance(file_info["a_mask"], int)
|
||||
file_info["rgb_mask"] = (
|
||||
file_info["r_mask"],
|
||||
file_info["g_mask"],
|
||||
file_info["b_mask"],
|
||||
)
|
||||
file_info["rgba_mask"] = (
|
||||
file_info["r_mask"],
|
||||
file_info["g_mask"],
|
||||
file_info["b_mask"],
|
||||
file_info["a_mask"],
|
||||
)
|
||||
else:
|
||||
msg = f"Unsupported BMP header type ({file_info['header_size']})"
|
||||
raise OSError(msg)
|
||||
|
||||
# ------------------ Special case : header is reported 40, which
|
||||
# ---------------------- is shorter than real size for bpp >= 16
|
||||
assert isinstance(file_info["width"], int)
|
||||
assert isinstance(file_info["height"], int)
|
||||
self._size = file_info["width"], file_info["height"]
|
||||
|
||||
# ------- If color count was not found in the header, compute from bits
|
||||
assert isinstance(file_info["bits"], int)
|
||||
file_info["colors"] = (
|
||||
file_info["colors"]
|
||||
if file_info.get("colors", 0)
|
||||
else (1 << file_info["bits"])
|
||||
)
|
||||
assert isinstance(file_info["colors"], int)
|
||||
if offset == 14 + file_info["header_size"] and file_info["bits"] <= 8:
|
||||
offset += 4 * file_info["colors"]
|
||||
|
||||
# ---------------------- Check bit depth for unusual unsupported values
|
||||
self._mode, raw_mode = BIT2MODE.get(file_info["bits"], ("", ""))
|
||||
if not self.mode:
|
||||
msg = f"Unsupported BMP pixel depth ({file_info['bits']})"
|
||||
raise OSError(msg)
|
||||
|
||||
# ---------------- Process BMP with Bitfields compression (not palette)
|
||||
decoder_name = "raw"
|
||||
if file_info["compression"] == self.COMPRESSIONS["BITFIELDS"]:
|
||||
SUPPORTED: dict[int, list[tuple[int, ...]]] = {
|
||||
32: [
|
||||
(0xFF0000, 0xFF00, 0xFF, 0x0),
|
||||
(0xFF000000, 0xFF0000, 0xFF00, 0x0),
|
||||
(0xFF000000, 0xFF00, 0xFF, 0x0),
|
||||
(0xFF000000, 0xFF0000, 0xFF00, 0xFF),
|
||||
(0xFF, 0xFF00, 0xFF0000, 0xFF000000),
|
||||
(0xFF0000, 0xFF00, 0xFF, 0xFF000000),
|
||||
(0xFF000000, 0xFF00, 0xFF, 0xFF0000),
|
||||
(0x0, 0x0, 0x0, 0x0),
|
||||
],
|
||||
24: [(0xFF0000, 0xFF00, 0xFF)],
|
||||
16: [(0xF800, 0x7E0, 0x1F), (0x7C00, 0x3E0, 0x1F)],
|
||||
}
|
||||
MASK_MODES = {
|
||||
(32, (0xFF0000, 0xFF00, 0xFF, 0x0)): "BGRX",
|
||||
(32, (0xFF000000, 0xFF0000, 0xFF00, 0x0)): "XBGR",
|
||||
(32, (0xFF000000, 0xFF00, 0xFF, 0x0)): "BGXR",
|
||||
(32, (0xFF000000, 0xFF0000, 0xFF00, 0xFF)): "ABGR",
|
||||
(32, (0xFF, 0xFF00, 0xFF0000, 0xFF000000)): "RGBA",
|
||||
(32, (0xFF0000, 0xFF00, 0xFF, 0xFF000000)): "BGRA",
|
||||
(32, (0xFF000000, 0xFF00, 0xFF, 0xFF0000)): "BGAR",
|
||||
(32, (0x0, 0x0, 0x0, 0x0)): "BGRA",
|
||||
(24, (0xFF0000, 0xFF00, 0xFF)): "BGR",
|
||||
(16, (0xF800, 0x7E0, 0x1F)): "BGR;16",
|
||||
(16, (0x7C00, 0x3E0, 0x1F)): "BGR;15",
|
||||
}
|
||||
if file_info["bits"] in SUPPORTED:
|
||||
if (
|
||||
file_info["bits"] == 32
|
||||
and file_info["rgba_mask"] in SUPPORTED[file_info["bits"]]
|
||||
):
|
||||
assert isinstance(file_info["rgba_mask"], tuple)
|
||||
raw_mode = MASK_MODES[(file_info["bits"], file_info["rgba_mask"])]
|
||||
self._mode = "RGBA" if "A" in raw_mode else self.mode
|
||||
elif (
|
||||
file_info["bits"] in (24, 16)
|
||||
and file_info["rgb_mask"] in SUPPORTED[file_info["bits"]]
|
||||
):
|
||||
assert isinstance(file_info["rgb_mask"], tuple)
|
||||
raw_mode = MASK_MODES[(file_info["bits"], file_info["rgb_mask"])]
|
||||
else:
|
||||
msg = "Unsupported BMP bitfields layout"
|
||||
raise OSError(msg)
|
||||
else:
|
||||
msg = "Unsupported BMP bitfields layout"
|
||||
raise OSError(msg)
|
||||
elif file_info["compression"] == self.COMPRESSIONS["RAW"]:
|
||||
if file_info["bits"] == 32 and header == 22: # 32-bit .cur offset
|
||||
raw_mode, self._mode = "BGRA", "RGBA"
|
||||
elif file_info["compression"] in (
|
||||
self.COMPRESSIONS["RLE8"],
|
||||
self.COMPRESSIONS["RLE4"],
|
||||
):
|
||||
decoder_name = "bmp_rle"
|
||||
else:
|
||||
msg = f"Unsupported BMP compression ({file_info['compression']})"
|
||||
raise OSError(msg)
|
||||
|
||||
# --------------- Once the header is processed, process the palette/LUT
|
||||
if self.mode == "P": # Paletted for 1, 4 and 8 bit images
|
||||
# ---------------------------------------------------- 1-bit images
|
||||
if not (0 < file_info["colors"] <= 65536):
|
||||
msg = f"Unsupported BMP Palette size ({file_info['colors']})"
|
||||
raise OSError(msg)
|
||||
else:
|
||||
assert isinstance(file_info["palette_padding"], int)
|
||||
padding = file_info["palette_padding"]
|
||||
palette = read(padding * file_info["colors"])
|
||||
grayscale = True
|
||||
indices = (
|
||||
(0, 255)
|
||||
if file_info["colors"] == 2
|
||||
else list(range(file_info["colors"]))
|
||||
)
|
||||
|
||||
# ----------------- Check if grayscale and ignore palette if so
|
||||
for ind, val in enumerate(indices):
|
||||
rgb = palette[ind * padding : ind * padding + 3]
|
||||
if rgb != o8(val) * 3:
|
||||
grayscale = False
|
||||
|
||||
# ------- If all colors are gray, white or black, ditch palette
|
||||
if grayscale:
|
||||
self._mode = "1" if file_info["colors"] == 2 else "L"
|
||||
raw_mode = self.mode
|
||||
else:
|
||||
self._mode = "P"
|
||||
self.palette = ImagePalette.raw(
|
||||
"BGRX" if padding == 4 else "BGR", palette
|
||||
)
|
||||
|
||||
# ---------------------------- Finally set the tile data for the plugin
|
||||
self.info["compression"] = file_info["compression"]
|
||||
args: list[Any] = [raw_mode]
|
||||
if decoder_name == "bmp_rle":
|
||||
args.append(file_info["compression"] == self.COMPRESSIONS["RLE4"])
|
||||
else:
|
||||
assert isinstance(file_info["width"], int)
|
||||
args.append(((file_info["width"] * file_info["bits"] + 31) >> 3) & (~3))
|
||||
args.append(file_info["direction"])
|
||||
self.tile = [
|
||||
ImageFile._Tile(
|
||||
decoder_name,
|
||||
(0, 0, file_info["width"], file_info["height"]),
|
||||
offset or self.fp.tell(),
|
||||
tuple(args),
|
||||
)
|
||||
]
|
||||
|
||||
def _open(self) -> None:
|
||||
"""Open file, check magic number and read header"""
|
||||
# read 14 bytes: magic number, filesize, reserved, header final offset
|
||||
head_data = self.fp.read(14)
|
||||
# choke if the file does not have the required magic bytes
|
||||
if not _accept(head_data):
|
||||
msg = "Not a BMP file"
|
||||
raise SyntaxError(msg)
|
||||
# read the start position of the BMP image data (u32)
|
||||
offset = i32(head_data, 10)
|
||||
# load bitmap information (offset=raster info)
|
||||
self._bitmap(offset=offset)
|
||||
|
||||
|
||||
class BmpRleDecoder(ImageFile.PyDecoder):
|
||||
_pulls_fd = True
|
||||
|
||||
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
|
||||
assert self.fd is not None
|
||||
rle4 = self.args[1]
|
||||
data = bytearray()
|
||||
x = 0
|
||||
dest_length = self.state.xsize * self.state.ysize
|
||||
while len(data) < dest_length:
|
||||
pixels = self.fd.read(1)
|
||||
byte = self.fd.read(1)
|
||||
if not pixels or not byte:
|
||||
break
|
||||
num_pixels = pixels[0]
|
||||
if num_pixels:
|
||||
# encoded mode
|
||||
if x + num_pixels > self.state.xsize:
|
||||
# Too much data for row
|
||||
num_pixels = max(0, self.state.xsize - x)
|
||||
if rle4:
|
||||
first_pixel = o8(byte[0] >> 4)
|
||||
second_pixel = o8(byte[0] & 0x0F)
|
||||
for index in range(num_pixels):
|
||||
if index % 2 == 0:
|
||||
data += first_pixel
|
||||
else:
|
||||
data += second_pixel
|
||||
else:
|
||||
data += byte * num_pixels
|
||||
x += num_pixels
|
||||
else:
|
||||
if byte[0] == 0:
|
||||
# end of line
|
||||
while len(data) % self.state.xsize != 0:
|
||||
data += b"\x00"
|
||||
x = 0
|
||||
elif byte[0] == 1:
|
||||
# end of bitmap
|
||||
break
|
||||
elif byte[0] == 2:
|
||||
# delta
|
||||
bytes_read = self.fd.read(2)
|
||||
if len(bytes_read) < 2:
|
||||
break
|
||||
right, up = self.fd.read(2)
|
||||
data += b"\x00" * (right + up * self.state.xsize)
|
||||
x = len(data) % self.state.xsize
|
||||
else:
|
||||
# absolute mode
|
||||
if rle4:
|
||||
# 2 pixels per byte
|
||||
byte_count = byte[0] // 2
|
||||
bytes_read = self.fd.read(byte_count)
|
||||
for byte_read in bytes_read:
|
||||
data += o8(byte_read >> 4)
|
||||
data += o8(byte_read & 0x0F)
|
||||
else:
|
||||
byte_count = byte[0]
|
||||
bytes_read = self.fd.read(byte_count)
|
||||
data += bytes_read
|
||||
if len(bytes_read) < byte_count:
|
||||
break
|
||||
x += byte[0]
|
||||
|
||||
# align to 16-bit word boundary
|
||||
if self.fd.tell() % 2 != 0:
|
||||
self.fd.seek(1, os.SEEK_CUR)
|
||||
rawmode = "L" if self.mode == "L" else "P"
|
||||
self.set_as_raw(bytes(data), rawmode, (0, self.args[-1]))
|
||||
return -1, 0
|
||||
|
||||
|
||||
# [AWS-SECRET-REMOVED]=====================================
|
||||
# Image plugin for the DIB format (BMP alias)
|
||||
# [AWS-SECRET-REMOVED]=====================================
|
||||
class DibImageFile(BmpImageFile):
|
||||
format = "DIB"
|
||||
format_description = "Windows Bitmap"
|
||||
|
||||
def _open(self) -> None:
|
||||
self._bitmap()
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
# Write BMP file
|
||||
|
||||
|
||||
SAVE = {
|
||||
"1": ("1", 1, 2),
|
||||
"L": ("L", 8, 256),
|
||||
"P": ("P", 8, 256),
|
||||
"RGB": ("BGR", 24, 0),
|
||||
"RGBA": ("BGRA", 32, 0),
|
||||
}
|
||||
|
||||
|
||||
def _dib_save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
||||
_save(im, fp, filename, False)
|
||||
|
||||
|
||||
def _save(
|
||||
im: Image.Image, fp: IO[bytes], filename: str | bytes, bitmap_header: bool = True
|
||||
) -> None:
|
||||
try:
|
||||
rawmode, bits, colors = SAVE[im.mode]
|
||||
except KeyError as e:
|
||||
msg = f"cannot write mode {im.mode} as BMP"
|
||||
raise OSError(msg) from e
|
||||
|
||||
info = im.encoderinfo
|
||||
|
||||
dpi = info.get("dpi", (96, 96))
|
||||
|
||||
# 1 meter == 39.3701 inches
|
||||
ppm = tuple(int(x * 39.3701 + 0.5) for x in dpi)
|
||||
|
||||
stride = ((im.size[0] * bits + 7) // 8 + 3) & (~3)
|
||||
header = 40 # or 64 for OS/2 version 2
|
||||
image = stride * im.size[1]
|
||||
|
||||
if im.mode == "1":
|
||||
palette = b"".join(o8(i) * 4 for i in (0, 255))
|
||||
elif im.mode == "L":
|
||||
palette = b"".join(o8(i) * 4 for i in range(256))
|
||||
elif im.mode == "P":
|
||||
palette = im.im.getpalette("RGB", "BGRX")
|
||||
colors = len(palette) // 4
|
||||
else:
|
||||
palette = None
|
||||
|
||||
# bitmap header
|
||||
if bitmap_header:
|
||||
offset = 14 + header + colors * 4
|
||||
file_size = offset + image
|
||||
if file_size > 2**32 - 1:
|
||||
msg = "File size is too large for the BMP format"
|
||||
raise ValueError(msg)
|
||||
fp.write(
|
||||
b"BM" # file type (magic)
|
||||
+ o32(file_size) # file size
|
||||
+ o32(0) # reserved
|
||||
+ o32(offset) # image data offset
|
||||
)
|
||||
|
||||
# bitmap info header
|
||||
fp.write(
|
||||
o32(header) # info header size
|
||||
+ o32(im.size[0]) # width
|
||||
+ o32(im.size[1]) # height
|
||||
+ o16(1) # planes
|
||||
+ o16(bits) # depth
|
||||
+ o32(0) # compression (0=uncompressed)
|
||||
+ o32(image) # size of bitmap
|
||||
+ o32(ppm[0]) # resolution
|
||||
+ o32(ppm[1]) # resolution
|
||||
+ o32(colors) # colors used
|
||||
+ o32(colors) # colors important
|
||||
)
|
||||
|
||||
fp.write(b"\0" * (header - 40)) # padding (for OS/2 format)
|
||||
|
||||
if palette:
|
||||
fp.write(palette)
|
||||
|
||||
ImageFile._save(
|
||||
im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, stride, -1))]
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
# Registry
|
||||
|
||||
|
||||
Image.register_open(BmpImageFile.format, BmpImageFile, _accept)
|
||||
Image.register_save(BmpImageFile.format, _save)
|
||||
|
||||
Image.register_extension(BmpImageFile.format, ".bmp")
|
||||
|
||||
Image.register_mime(BmpImageFile.format, "image/bmp")
|
||||
|
||||
Image.register_decoder("bmp_rle", BmpRleDecoder)
|
||||
|
||||
Image.register_open(DibImageFile.format, DibImageFile, _dib_accept)
|
||||
Image.register_save(DibImageFile.format, _dib_save)
|
||||
|
||||
Image.register_extension(DibImageFile.format, ".dib")
|
||||
|
||||
Image.register_mime(DibImageFile.format, "image/bmp")
|
||||
@@ -1,76 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# BUFR stub adapter
|
||||
#
|
||||
# Copyright (c) 1996-2003 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import IO
|
||||
|
||||
from . import Image, ImageFile
|
||||
|
||||
_handler = None
|
||||
|
||||
|
||||
def register_handler(handler: ImageFile.StubHandler | None) -> None:
|
||||
"""
|
||||
Install application-specific BUFR image handler.
|
||||
|
||||
:param handler: Handler object.
|
||||
"""
|
||||
global _handler
|
||||
_handler = handler
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Image adapter
|
||||
|
||||
|
||||
def _accept(prefix: bytes) -> bool:
|
||||
return prefix[:4] == b"BUFR" or prefix[:4] == b"ZCZC"
|
||||
|
||||
|
||||
class BufrStubImageFile(ImageFile.StubImageFile):
|
||||
format = "BUFR"
|
||||
format_description = "BUFR"
|
||||
|
||||
def _open(self) -> None:
|
||||
offset = self.fp.tell()
|
||||
|
||||
if not _accept(self.fp.read(4)):
|
||||
msg = "Not a BUFR file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self.fp.seek(offset)
|
||||
|
||||
# make something up
|
||||
self._mode = "F"
|
||||
self._size = 1, 1
|
||||
|
||||
loader = self._load()
|
||||
if loader:
|
||||
loader.open(self)
|
||||
|
||||
def _load(self) -> ImageFile.StubHandler | None:
|
||||
return _handler
|
||||
|
||||
|
||||
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
||||
if _handler is None or not hasattr(_handler, "save"):
|
||||
msg = "BUFR save handler not installed"
|
||||
raise OSError(msg)
|
||||
_handler.save(im, fp, filename)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Registry
|
||||
|
||||
Image.register_open(BufrStubImageFile.format, BufrStubImageFile, _accept)
|
||||
Image.register_save(BufrStubImageFile.format, _save)
|
||||
|
||||
Image.register_extension(BufrStubImageFile.format, ".bufr")
|
||||
@@ -1,173 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# a class to read from a container file
|
||||
#
|
||||
# History:
|
||||
# 1995-06-18 fl Created
|
||||
# 1995-09-07 fl Added readline(), readlines()
|
||||
#
|
||||
# Copyright (c) 1997-2001 by Secret Labs AB
|
||||
# Copyright (c) 1995 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
from collections.abc import Iterable
|
||||
from typing import IO, AnyStr, NoReturn
|
||||
|
||||
|
||||
class ContainerIO(IO[AnyStr]):
|
||||
"""
|
||||
A file object that provides read access to a part of an existing
|
||||
file (for example a TAR file).
|
||||
"""
|
||||
|
||||
def __init__(self, file: IO[AnyStr], offset: int, length: int) -> None:
|
||||
"""
|
||||
Create file object.
|
||||
|
||||
:param file: Existing file.
|
||||
:param offset: Start of region, in bytes.
|
||||
:param length: Size of region, in bytes.
|
||||
"""
|
||||
self.fh: IO[AnyStr] = file
|
||||
self.pos = 0
|
||||
self.offset = offset
|
||||
self.length = length
|
||||
self.fh.seek(offset)
|
||||
|
||||
##
|
||||
# Always false.
|
||||
|
||||
def isatty(self) -> bool:
|
||||
return False
|
||||
|
||||
def seekable(self) -> bool:
|
||||
return True
|
||||
|
||||
def seek(self, offset: int, mode: int = io.SEEK_SET) -> int:
|
||||
"""
|
||||
Move file pointer.
|
||||
|
||||
:param offset: Offset in bytes.
|
||||
:param mode: Starting position. Use 0 for beginning of region, 1
|
||||
for current offset, and 2 for end of region. You cannot move
|
||||
the pointer outside the defined region.
|
||||
:returns: Offset from start of region, in bytes.
|
||||
"""
|
||||
if mode == 1:
|
||||
self.pos = self.pos + offset
|
||||
elif mode == 2:
|
||||
self.pos = self.length + offset
|
||||
else:
|
||||
self.pos = offset
|
||||
# clamp
|
||||
self.pos = max(0, min(self.pos, self.length))
|
||||
self.fh.seek(self.offset + self.pos)
|
||||
return self.pos
|
||||
|
||||
def tell(self) -> int:
|
||||
"""
|
||||
Get current file pointer.
|
||||
|
||||
:returns: Offset from start of region, in bytes.
|
||||
"""
|
||||
return self.pos
|
||||
|
||||
def readable(self) -> bool:
|
||||
return True
|
||||
|
||||
def read(self, n: int = -1) -> AnyStr:
|
||||
"""
|
||||
Read data.
|
||||
|
||||
:param n: Number of bytes to read. If omitted, zero or negative,
|
||||
read until end of region.
|
||||
:returns: An 8-bit string.
|
||||
"""
|
||||
if n > 0:
|
||||
n = min(n, self.length - self.pos)
|
||||
else:
|
||||
n = self.length - self.pos
|
||||
if n <= 0: # EOF
|
||||
return b"" if "b" in self.fh.mode else "" # type: ignore[return-value]
|
||||
self.pos = self.pos + n
|
||||
return self.fh.read(n)
|
||||
|
||||
def readline(self, n: int = -1) -> AnyStr:
|
||||
"""
|
||||
Read a line of text.
|
||||
|
||||
:param n: Number of bytes to read. If omitted, zero or negative,
|
||||
read until end of line.
|
||||
:returns: An 8-bit string.
|
||||
"""
|
||||
s: AnyStr = b"" if "b" in self.fh.mode else "" # type: ignore[assignment]
|
||||
newline_character = b"\n" if "b" in self.fh.mode else "\n"
|
||||
while True:
|
||||
c = self.read(1)
|
||||
if not c:
|
||||
break
|
||||
s = s + c
|
||||
if c == newline_character or len(s) == n:
|
||||
break
|
||||
return s
|
||||
|
||||
def readlines(self, n: int | None = -1) -> list[AnyStr]:
|
||||
"""
|
||||
Read multiple lines of text.
|
||||
|
||||
:param n: Number of lines to read. If omitted, zero, negative or None,
|
||||
read until end of region.
|
||||
:returns: A list of 8-bit strings.
|
||||
"""
|
||||
lines = []
|
||||
while True:
|
||||
s = self.readline()
|
||||
if not s:
|
||||
break
|
||||
lines.append(s)
|
||||
if len(lines) == n:
|
||||
break
|
||||
return lines
|
||||
|
||||
def writable(self) -> bool:
|
||||
return False
|
||||
|
||||
def write(self, b: AnyStr) -> NoReturn:
|
||||
raise NotImplementedError()
|
||||
|
||||
def writelines(self, lines: Iterable[AnyStr]) -> NoReturn:
|
||||
raise NotImplementedError()
|
||||
|
||||
def truncate(self, size: int | None = None) -> int:
|
||||
raise NotImplementedError()
|
||||
|
||||
def __enter__(self) -> ContainerIO[AnyStr]:
|
||||
return self
|
||||
|
||||
def __exit__(self, *args: object) -> None:
|
||||
self.close()
|
||||
|
||||
def __iter__(self) -> ContainerIO[AnyStr]:
|
||||
return self
|
||||
|
||||
def __next__(self) -> AnyStr:
|
||||
line = self.readline()
|
||||
if not line:
|
||||
msg = "end of region"
|
||||
raise StopIteration(msg)
|
||||
return line
|
||||
|
||||
def fileno(self) -> int:
|
||||
return self.fh.fileno()
|
||||
|
||||
def flush(self) -> None:
|
||||
self.fh.flush()
|
||||
|
||||
def close(self) -> None:
|
||||
self.fh.close()
|
||||
@@ -1,75 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# Windows Cursor support for PIL
|
||||
#
|
||||
# notes:
|
||||
# uses BmpImagePlugin.py to read the bitmap data.
|
||||
#
|
||||
# history:
|
||||
# 96-05-27 fl Created
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1996.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
from . import BmpImagePlugin, Image, ImageFile
|
||||
from ._binary import i16le as i16
|
||||
from ._binary import i32le as i32
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
def _accept(prefix: bytes) -> bool:
|
||||
return prefix[:4] == b"\0\0\2\0"
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for Windows Cursor files.
|
||||
|
||||
|
||||
class CurImageFile(BmpImagePlugin.BmpImageFile):
|
||||
format = "CUR"
|
||||
format_description = "Windows Cursor"
|
||||
|
||||
def _open(self) -> None:
|
||||
offset = self.fp.tell()
|
||||
|
||||
# check magic
|
||||
s = self.fp.read(6)
|
||||
if not _accept(s):
|
||||
msg = "not a CUR file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
# pick the largest cursor in the file
|
||||
m = b""
|
||||
for i in range(i16(s, 4)):
|
||||
s = self.fp.read(16)
|
||||
if not m:
|
||||
m = s
|
||||
elif s[0] > m[0] and s[1] > m[1]:
|
||||
m = s
|
||||
if not m:
|
||||
msg = "No cursors were found"
|
||||
raise TypeError(msg)
|
||||
|
||||
# load as bitmap
|
||||
self._bitmap(i32(m, 12) + offset)
|
||||
|
||||
# patch up the bitmap height
|
||||
self._size = self.size[0], self.size[1] // 2
|
||||
d, e, o, a = self.tile[0]
|
||||
self.tile[0] = ImageFile._Tile(d, (0, 0) + self.size, o, a)
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
Image.register_open(CurImageFile.format, CurImageFile, _accept)
|
||||
|
||||
Image.register_extension(CurImageFile.format, ".cur")
|
||||
@@ -1,80 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# DCX file handling
|
||||
#
|
||||
# DCX is a container file format defined by Intel, commonly used
|
||||
# for fax applications. Each DCX file consists of a directory
|
||||
# (a list of file offsets) followed by a set of (usually 1-bit)
|
||||
# PCX files.
|
||||
#
|
||||
# History:
|
||||
# 1995-09-09 fl Created
|
||||
# 1996-03-20 fl Properly derived from PcxImageFile.
|
||||
# 1998-07-15 fl Renamed offset attribute to avoid name clash
|
||||
# 2002-07-30 fl Fixed file handling
|
||||
#
|
||||
# Copyright (c) 1997-98 by Secret Labs AB.
|
||||
# Copyright (c) 1995-96 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
from . import Image
|
||||
from ._binary import i32le as i32
|
||||
from .PcxImagePlugin import PcxImageFile
|
||||
|
||||
MAGIC = 0x3ADE68B1 # QUIZ: what's this value, then?
|
||||
|
||||
|
||||
def _accept(prefix: bytes) -> bool:
|
||||
return len(prefix) >= 4 and i32(prefix) == MAGIC
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for the Intel DCX format.
|
||||
|
||||
|
||||
class DcxImageFile(PcxImageFile):
|
||||
format = "DCX"
|
||||
format_description = "Intel DCX"
|
||||
_close_exclusive_fp_after_loading = False
|
||||
|
||||
def _open(self) -> None:
|
||||
# Header
|
||||
s = self.fp.read(4)
|
||||
if not _accept(s):
|
||||
msg = "not a DCX file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
# Component directory
|
||||
self._offset = []
|
||||
for i in range(1024):
|
||||
offset = i32(self.fp.read(4))
|
||||
if not offset:
|
||||
break
|
||||
self._offset.append(offset)
|
||||
|
||||
self._fp = self.fp
|
||||
self.frame = -1
|
||||
self.n_frames = len(self._offset)
|
||||
self.is_animated = self.n_frames > 1
|
||||
self.seek(0)
|
||||
|
||||
def seek(self, frame: int) -> None:
|
||||
if not self._seek_check(frame):
|
||||
return
|
||||
self.frame = frame
|
||||
self.fp = self._fp
|
||||
self.fp.seek(self._offset[frame])
|
||||
PcxImageFile._open(self)
|
||||
|
||||
def tell(self) -> int:
|
||||
return self.frame
|
||||
|
||||
|
||||
Image.register_open(DcxImageFile.format, DcxImageFile, _accept)
|
||||
|
||||
Image.register_extension(DcxImageFile.format, ".dcx")
|
||||
@@ -1,575 +0,0 @@
|
||||
"""
|
||||
A Pillow loader for .dds files (S3TC-compressed aka DXTC)
|
||||
Jerome Leclanche <jerome@leclan.ch>
|
||||
|
||||
Documentation:
|
||||
https://web.archive.org/web/20170802060935/http://oss.sgi.com/projects/ogl-sample/registry/EXT/texture_compression_s3tc.txt
|
||||
|
||||
The contents of this file are hereby released in the public domain (CC0)
|
||||
Full text of the CC0 license:
|
||||
https://creativecommons.org/publicdomain/zero/1.0/
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import struct
|
||||
import sys
|
||||
from enum import IntEnum, IntFlag
|
||||
from typing import IO
|
||||
|
||||
from . import Image, ImageFile, ImagePalette
|
||||
from ._binary import i32le as i32
|
||||
from ._binary import o8
|
||||
from ._binary import o32le as o32
|
||||
|
||||
# Magic ("DDS ")
|
||||
DDS_MAGIC = 0x20534444
|
||||
|
||||
|
||||
# DDS flags
|
||||
class DDSD(IntFlag):
|
||||
CAPS = 0x1
|
||||
HEIGHT = 0x2
|
||||
WIDTH = 0x4
|
||||
PITCH = 0x8
|
||||
PIXELFORMAT = 0x1000
|
||||
MIPMAPCOUNT = 0x20000
|
||||
LINEARSIZE = 0x80000
|
||||
DEPTH = 0x800000
|
||||
|
||||
|
||||
# DDS caps
|
||||
class DDSCAPS(IntFlag):
|
||||
COMPLEX = 0x8
|
||||
TEXTURE = 0x1000
|
||||
MIPMAP = 0x400000
|
||||
|
||||
|
||||
class DDSCAPS2(IntFlag):
|
||||
CUBEMAP = 0x200
|
||||
CUBEMAP_POSITIVEX = 0x400
|
||||
CUBEMAP_NEGATIVEX = 0x800
|
||||
CUBEMAP_POSITIVEY = 0x1000
|
||||
CUBEMAP_NEGATIVEY = 0x2000
|
||||
CUBEMAP_POSITIVEZ = 0x4000
|
||||
CUBEMAP_NEGATIVEZ = 0x8000
|
||||
VOLUME = 0x200000
|
||||
|
||||
|
||||
# Pixel Format
|
||||
class DDPF(IntFlag):
|
||||
ALPHAPIXELS = 0x1
|
||||
ALPHA = 0x2
|
||||
FOURCC = 0x4
|
||||
PALETTEINDEXED8 = 0x20
|
||||
RGB = 0x40
|
||||
LUMINANCE = 0x20000
|
||||
|
||||
|
||||
# dxgiformat.h
|
||||
class DXGI_FORMAT(IntEnum):
|
||||
UNKNOWN = 0
|
||||
R32G32B32A32_TYPELESS = 1
|
||||
R32G32B32A32_FLOAT = 2
|
||||
R32G32B32A32_UINT = 3
|
||||
R32G32B32A32_SINT = 4
|
||||
R32G32B32_TYPELESS = 5
|
||||
R32G32B32_FLOAT = 6
|
||||
R32G32B32_UINT = 7
|
||||
R32G32B32_SINT = 8
|
||||
R16G16B16A16_TYPELESS = 9
|
||||
R16G16B16A16_FLOAT = 10
|
||||
R16G16B16A16_UNORM = 11
|
||||
R16G16B16A16_UINT = 12
|
||||
R16G16B16A16_SNORM = 13
|
||||
R16G16B16A16_SINT = 14
|
||||
R32G32_TYPELESS = 15
|
||||
R32G32_FLOAT = 16
|
||||
R32G32_UINT = 17
|
||||
R32G32_SINT = 18
|
||||
R32G8X24_TYPELESS = 19
|
||||
D32_FLOAT_S8X24_UINT = 20
|
||||
R32_FLOAT_X8X24_TYPELESS = 21
|
||||
X32_TYPELESS_G8X24_UINT = 22
|
||||
R10G10B10A2_TYPELESS = 23
|
||||
R10G10B10A2_UNORM = 24
|
||||
R10G10B10A2_UINT = 25
|
||||
R11G11B10_FLOAT = 26
|
||||
R8G8B8A8_TYPELESS = 27
|
||||
R8G8B8A8_UNORM = 28
|
||||
R8G8B8A8_UNORM_SRGB = 29
|
||||
R8G8B8A8_UINT = 30
|
||||
R8G8B8A8_SNORM = 31
|
||||
R8G8B8A8_SINT = 32
|
||||
R16G16_TYPELESS = 33
|
||||
R16G16_FLOAT = 34
|
||||
R16G16_UNORM = 35
|
||||
R16G16_UINT = 36
|
||||
R16G16_SNORM = 37
|
||||
R16G16_SINT = 38
|
||||
R32_TYPELESS = 39
|
||||
D32_FLOAT = 40
|
||||
R32_FLOAT = 41
|
||||
R32_UINT = 42
|
||||
R32_SINT = 43
|
||||
R24G8_TYPELESS = 44
|
||||
D24_UNORM_S8_UINT = 45
|
||||
R24_UNORM_X8_TYPELESS = 46
|
||||
X24_TYPELESS_G8_UINT = 47
|
||||
R8G8_TYPELESS = 48
|
||||
R8G8_UNORM = 49
|
||||
R8G8_UINT = 50
|
||||
R8G8_SNORM = 51
|
||||
R8G8_SINT = 52
|
||||
R16_TYPELESS = 53
|
||||
R16_FLOAT = 54
|
||||
D16_UNORM = 55
|
||||
R16_UNORM = 56
|
||||
R16_UINT = 57
|
||||
R16_SNORM = 58
|
||||
R16_SINT = 59
|
||||
R8_TYPELESS = 60
|
||||
R8_UNORM = 61
|
||||
R8_UINT = 62
|
||||
R8_SNORM = 63
|
||||
R8_SINT = 64
|
||||
A8_UNORM = 65
|
||||
R1_UNORM = 66
|
||||
R9G9B9E5_SHAREDEXP = 67
|
||||
R8G8_B8G8_UNORM = 68
|
||||
G8R8_G8B8_UNORM = 69
|
||||
BC1_TYPELESS = 70
|
||||
BC1_UNORM = 71
|
||||
BC1_UNORM_SRGB = 72
|
||||
BC2_TYPELESS = 73
|
||||
BC2_UNORM = 74
|
||||
BC2_UNORM_SRGB = 75
|
||||
BC3_TYPELESS = 76
|
||||
BC3_UNORM = 77
|
||||
BC3_UNORM_SRGB = 78
|
||||
BC4_TYPELESS = 79
|
||||
BC4_UNORM = 80
|
||||
BC4_SNORM = 81
|
||||
BC5_TYPELESS = 82
|
||||
BC5_UNORM = 83
|
||||
BC5_SNORM = 84
|
||||
B5G6R5_UNORM = 85
|
||||
B5G5R5A1_UNORM = 86
|
||||
B8G8R8A8_UNORM = 87
|
||||
B8G8R8X8_UNORM = 88
|
||||
R10G10B10_XR_BIAS_A2_UNORM = 89
|
||||
B8G8R8A8_TYPELESS = 90
|
||||
B8G8R8A8_UNORM_SRGB = 91
|
||||
B8G8R8X8_TYPELESS = 92
|
||||
B8G8R8X8_UNORM_SRGB = 93
|
||||
BC6H_TYPELESS = 94
|
||||
BC6H_UF16 = 95
|
||||
BC6H_SF16 = 96
|
||||
BC7_TYPELESS = 97
|
||||
BC7_UNORM = 98
|
||||
BC7_UNORM_SRGB = 99
|
||||
AYUV = 100
|
||||
Y410 = 101
|
||||
Y416 = 102
|
||||
NV12 = 103
|
||||
P010 = 104
|
||||
P016 = 105
|
||||
OPAQUE_420 = 106
|
||||
YUY2 = 107
|
||||
Y210 = 108
|
||||
Y216 = 109
|
||||
NV11 = 110
|
||||
AI44 = 111
|
||||
IA44 = 112
|
||||
P8 = 113
|
||||
A8P8 = 114
|
||||
B4G4R4A4_UNORM = 115
|
||||
P208 = 130
|
||||
V208 = 131
|
||||
V408 = 132
|
||||
SAMPLER_FEEDBACK_MIN_MIP_OPAQUE = 189
|
||||
SAMPLER_FEEDBACK_MIP_REGION_USED_OPAQUE = 190
|
||||
|
||||
|
||||
class D3DFMT(IntEnum):
|
||||
UNKNOWN = 0
|
||||
R8G8B8 = 20
|
||||
A8R8G8B8 = 21
|
||||
X8R8G8B8 = 22
|
||||
R5G6B5 = 23
|
||||
X1R5G5B5 = 24
|
||||
A1R5G5B5 = 25
|
||||
A4R4G4B4 = 26
|
||||
R3G3B2 = 27
|
||||
A8 = 28
|
||||
A8R3G3B2 = 29
|
||||
X4R4G4B4 = 30
|
||||
A2B10G10R10 = 31
|
||||
A8B8G8R8 = 32
|
||||
X8B8G8R8 = 33
|
||||
G16R16 = 34
|
||||
A2R10G10B10 = 35
|
||||
A16B16G16R16 = 36
|
||||
A8P8 = 40
|
||||
P8 = 41
|
||||
L8 = 50
|
||||
A8L8 = 51
|
||||
A4L4 = 52
|
||||
V8U8 = 60
|
||||
L6V5U5 = 61
|
||||
X8L8V8U8 = 62
|
||||
Q8W8V8U8 = 63
|
||||
V16U16 = 64
|
||||
A2W10V10U10 = 67
|
||||
D16_LOCKABLE = 70
|
||||
D32 = 71
|
||||
D15S1 = 73
|
||||
D24S8 = 75
|
||||
D24X8 = 77
|
||||
D24X4S4 = 79
|
||||
D16 = 80
|
||||
D32F_LOCKABLE = 82
|
||||
D24FS8 = 83
|
||||
D32_LOCKABLE = 84
|
||||
S8_LOCKABLE = 85
|
||||
L16 = 81
|
||||
VERTEXDATA = 100
|
||||
INDEX16 = 101
|
||||
INDEX32 = 102
|
||||
Q16W16V16U16 = 110
|
||||
R16F = 111
|
||||
G16R16F = 112
|
||||
A16B16G16R16F = 113
|
||||
R32F = 114
|
||||
G32R32F = 115
|
||||
A32B32G32R32F = 116
|
||||
CxV8U8 = 117
|
||||
A1 = 118
|
||||
A2B10G10R10_XR_BIAS = 119
|
||||
BINARYBUFFER = 199
|
||||
|
||||
UYVY = i32(b"UYVY")
|
||||
R8G8_B8G8 = i32(b"RGBG")
|
||||
YUY2 = i32(b"YUY2")
|
||||
G8R8_G8B8 = i32(b"GRGB")
|
||||
DXT1 = i32(b"DXT1")
|
||||
DXT2 = i32(b"DXT2")
|
||||
DXT3 = i32(b"DXT3")
|
||||
DXT4 = i32(b"DXT4")
|
||||
DXT5 = i32(b"DXT5")
|
||||
DX10 = i32(b"DX10")
|
||||
BC4S = i32(b"BC4S")
|
||||
BC4U = i32(b"BC4U")
|
||||
BC5S = i32(b"BC5S")
|
||||
BC5U = i32(b"BC5U")
|
||||
ATI1 = i32(b"ATI1")
|
||||
ATI2 = i32(b"ATI2")
|
||||
MULTI2_ARGB8 = i32(b"MET1")
|
||||
|
||||
|
||||
# Backward compatibility layer
|
||||
module = sys.modules[__name__]
|
||||
for item in DDSD:
|
||||
assert item.name is not None
|
||||
setattr(module, f"DDSD_{item.name}", item.value)
|
||||
for item1 in DDSCAPS:
|
||||
assert item1.name is not None
|
||||
setattr(module, f"DDSCAPS_{item1.name}", item1.value)
|
||||
for item2 in DDSCAPS2:
|
||||
assert item2.name is not None
|
||||
setattr(module, f"DDSCAPS2_{item2.name}", item2.value)
|
||||
for item3 in DDPF:
|
||||
assert item3.name is not None
|
||||
setattr(module, f"DDPF_{item3.name}", item3.value)
|
||||
|
||||
DDS_FOURCC = DDPF.FOURCC
|
||||
DDS_RGB = DDPF.RGB
|
||||
DDS_RGBA = DDPF.RGB | DDPF.ALPHAPIXELS
|
||||
DDS_LUMINANCE = DDPF.LUMINANCE
|
||||
DDS_LUMINANCEA = DDPF.LUMINANCE | DDPF.ALPHAPIXELS
|
||||
DDS_ALPHA = DDPF.ALPHA
|
||||
DDS_PAL8 = DDPF.PALETTEINDEXED8
|
||||
|
||||
DDS_HEADER_FLAGS_TEXTURE = DDSD.CAPS | DDSD.HEIGHT | DDSD.WIDTH | DDSD.PIXELFORMAT
|
||||
DDS_HEADER_FLAGS_MIPMAP = DDSD.MIPMAPCOUNT
|
||||
DDS_HEADER_FLAGS_VOLUME = DDSD.DEPTH
|
||||
DDS_HEADER_FLAGS_PITCH = DDSD.PITCH
|
||||
DDS_HEADER_FLAGS_LINEARSIZE = DDSD.LINEARSIZE
|
||||
|
||||
DDS_HEIGHT = DDSD.HEIGHT
|
||||
DDS_WIDTH = DDSD.WIDTH
|
||||
|
||||
DDS_SURFACE_FLAGS_TEXTURE = DDSCAPS.TEXTURE
|
||||
DDS_SURFACE_FLAGS_MIPMAP = DDSCAPS.COMPLEX | DDSCAPS.MIPMAP
|
||||
DDS_SURFACE_FLAGS_CUBEMAP = DDSCAPS.COMPLEX
|
||||
|
||||
DDS_CUBEMAP_POSITIVEX = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_POSITIVEX
|
||||
DDS_CUBEMAP_NEGATIVEX = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_NEGATIVEX
|
||||
DDS_CUBEMAP_POSITIVEY = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_POSITIVEY
|
||||
DDS_CUBEMAP_NEGATIVEY = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_NEGATIVEY
|
||||
DDS_CUBEMAP_POSITIVEZ = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_POSITIVEZ
|
||||
DDS_CUBEMAP_NEGATIVEZ = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_NEGATIVEZ
|
||||
|
||||
DXT1_FOURCC = D3DFMT.DXT1
|
||||
DXT3_FOURCC = D3DFMT.DXT3
|
||||
DXT5_FOURCC = D3DFMT.DXT5
|
||||
|
||||
DXGI_FORMAT_R8G8B8A8_TYPELESS = DXGI_FORMAT.R8G8B8A8_TYPELESS
|
||||
DXGI_FORMAT_R8G8B8A8_UNORM = DXGI_FORMAT.R8G8B8A8_UNORM
|
||||
DXGI_FORMAT_R8G8B8A8_UNORM_SRGB = DXGI_FORMAT.R8G8B8A8_UNORM_SRGB
|
||||
DXGI_FORMAT_BC5_TYPELESS = DXGI_FORMAT.BC5_TYPELESS
|
||||
DXGI_FORMAT_BC5_UNORM = DXGI_FORMAT.BC5_UNORM
|
||||
DXGI_FORMAT_BC5_SNORM = DXGI_FORMAT.BC5_SNORM
|
||||
DXGI_FORMAT_BC6H_UF16 = DXGI_FORMAT.BC6H_UF16
|
||||
DXGI_FORMAT_BC6H_SF16 = DXGI_FORMAT.BC6H_SF16
|
||||
DXGI_FORMAT_BC7_TYPELESS = DXGI_FORMAT.BC7_TYPELESS
|
||||
DXGI_FORMAT_BC7_UNORM = DXGI_FORMAT.BC7_UNORM
|
||||
DXGI_FORMAT_BC7_UNORM_SRGB = DXGI_FORMAT.BC7_UNORM_SRGB
|
||||
|
||||
|
||||
class DdsImageFile(ImageFile.ImageFile):
|
||||
format = "DDS"
|
||||
format_description = "DirectDraw Surface"
|
||||
|
||||
def _open(self) -> None:
|
||||
if not _accept(self.fp.read(4)):
|
||||
msg = "not a DDS file"
|
||||
raise SyntaxError(msg)
|
||||
(header_size,) = struct.unpack("<I", self.fp.read(4))
|
||||
if header_size != 124:
|
||||
msg = f"Unsupported header size {repr(header_size)}"
|
||||
raise OSError(msg)
|
||||
header_bytes = self.fp.read(header_size - 4)
|
||||
if len(header_bytes) != 120:
|
||||
msg = f"Incomplete header: {len(header_bytes)} bytes"
|
||||
raise OSError(msg)
|
||||
header = io.BytesIO(header_bytes)
|
||||
|
||||
flags, height, width = struct.unpack("<3I", header.read(12))
|
||||
self._size = (width, height)
|
||||
extents = (0, 0) + self.size
|
||||
|
||||
pitch, depth, mipmaps = struct.unpack("<3I", header.read(12))
|
||||
struct.unpack("<11I", header.read(44)) # reserved
|
||||
|
||||
# pixel format
|
||||
pfsize, pfflags, fourcc, bitcount = struct.unpack("<4I", header.read(16))
|
||||
n = 0
|
||||
rawmode = None
|
||||
if pfflags & DDPF.RGB:
|
||||
# Texture contains uncompressed RGB data
|
||||
if pfflags & DDPF.ALPHAPIXELS:
|
||||
self._mode = "RGBA"
|
||||
mask_count = 4
|
||||
else:
|
||||
self._mode = "RGB"
|
||||
mask_count = 3
|
||||
|
||||
masks = struct.unpack(f"<{mask_count}I", header.read(mask_count * 4))
|
||||
self.tile = [ImageFile._Tile("dds_rgb", extents, 0, (bitcount, masks))]
|
||||
return
|
||||
elif pfflags & DDPF.LUMINANCE:
|
||||
if bitcount == 8:
|
||||
self._mode = "L"
|
||||
elif bitcount == 16 and pfflags & DDPF.ALPHAPIXELS:
|
||||
self._mode = "LA"
|
||||
else:
|
||||
msg = f"Unsupported bitcount {bitcount} for {pfflags}"
|
||||
raise OSError(msg)
|
||||
elif pfflags & DDPF.PALETTEINDEXED8:
|
||||
self._mode = "P"
|
||||
self.palette = ImagePalette.raw("RGBA", self.fp.read(1024))
|
||||
self.palette.mode = "RGBA"
|
||||
elif pfflags & DDPF.FOURCC:
|
||||
offset = header_size + 4
|
||||
if fourcc == D3DFMT.DXT1:
|
||||
self._mode = "RGBA"
|
||||
self.pixel_format = "DXT1"
|
||||
n = 1
|
||||
elif fourcc == D3DFMT.DXT3:
|
||||
self._mode = "RGBA"
|
||||
self.pixel_format = "DXT3"
|
||||
n = 2
|
||||
elif fourcc == D3DFMT.DXT5:
|
||||
self._mode = "RGBA"
|
||||
self.pixel_format = "DXT5"
|
||||
n = 3
|
||||
elif fourcc in (D3DFMT.BC4U, D3DFMT.ATI1):
|
||||
self._mode = "L"
|
||||
self.pixel_format = "BC4"
|
||||
n = 4
|
||||
elif fourcc == D3DFMT.BC5S:
|
||||
self._mode = "RGB"
|
||||
self.pixel_format = "BC5S"
|
||||
n = 5
|
||||
elif fourcc in (D3DFMT.BC5U, D3DFMT.ATI2):
|
||||
self._mode = "RGB"
|
||||
self.pixel_format = "BC5"
|
||||
n = 5
|
||||
elif fourcc == D3DFMT.DX10:
|
||||
offset += 20
|
||||
# ignoring flags which pertain to volume textures and cubemaps
|
||||
(dxgi_format,) = struct.unpack("<I", self.fp.read(4))
|
||||
self.fp.read(16)
|
||||
if dxgi_format in (
|
||||
DXGI_FORMAT.BC1_UNORM,
|
||||
DXGI_FORMAT.BC1_TYPELESS,
|
||||
):
|
||||
self._mode = "RGBA"
|
||||
self.pixel_format = "BC1"
|
||||
n = 1
|
||||
elif dxgi_format in (DXGI_FORMAT.BC4_TYPELESS, DXGI_FORMAT.BC4_UNORM):
|
||||
self._mode = "L"
|
||||
self.pixel_format = "BC4"
|
||||
n = 4
|
||||
elif dxgi_format in (DXGI_FORMAT.BC5_TYPELESS, DXGI_FORMAT.BC5_UNORM):
|
||||
self._mode = "RGB"
|
||||
self.pixel_format = "BC5"
|
||||
n = 5
|
||||
elif dxgi_format == DXGI_FORMAT.BC5_SNORM:
|
||||
self._mode = "RGB"
|
||||
self.pixel_format = "BC5S"
|
||||
n = 5
|
||||
elif dxgi_format == DXGI_FORMAT.BC6H_UF16:
|
||||
self._mode = "RGB"
|
||||
self.pixel_format = "BC6H"
|
||||
n = 6
|
||||
elif dxgi_format == DXGI_FORMAT.BC6H_SF16:
|
||||
self._mode = "RGB"
|
||||
self.pixel_format = "BC6HS"
|
||||
n = 6
|
||||
elif dxgi_format in (
|
||||
DXGI_FORMAT.BC7_TYPELESS,
|
||||
DXGI_FORMAT.BC7_UNORM,
|
||||
DXGI_FORMAT.BC7_UNORM_SRGB,
|
||||
):
|
||||
self._mode = "RGBA"
|
||||
self.pixel_format = "BC7"
|
||||
n = 7
|
||||
if dxgi_format == DXGI_FORMAT.BC7_UNORM_SRGB:
|
||||
self.info["gamma"] = 1 / 2.2
|
||||
elif dxgi_format in (
|
||||
DXGI_FORMAT.R8G8B8A8_TYPELESS,
|
||||
DXGI_FORMAT.R8G8B8A8_UNORM,
|
||||
DXGI_FORMAT.R8G8B8A8_UNORM_SRGB,
|
||||
):
|
||||
self._mode = "RGBA"
|
||||
if dxgi_format == DXGI_FORMAT.R8G8B8A8_UNORM_SRGB:
|
||||
self.info["gamma"] = 1 / 2.2
|
||||
else:
|
||||
msg = f"Unimplemented DXGI format {dxgi_format}"
|
||||
raise NotImplementedError(msg)
|
||||
else:
|
||||
msg = f"Unimplemented pixel format {repr(fourcc)}"
|
||||
raise NotImplementedError(msg)
|
||||
else:
|
||||
msg = f"Unknown pixel format flags {pfflags}"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
if n:
|
||||
self.tile = [
|
||||
ImageFile._Tile("bcn", extents, offset, (n, self.pixel_format))
|
||||
]
|
||||
else:
|
||||
self.tile = [ImageFile._Tile("raw", extents, 0, rawmode or self.mode)]
|
||||
|
||||
def load_seek(self, pos: int) -> None:
|
||||
pass
|
||||
|
||||
|
||||
class DdsRgbDecoder(ImageFile.PyDecoder):
|
||||
_pulls_fd = True
|
||||
|
||||
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
|
||||
assert self.fd is not None
|
||||
bitcount, masks = self.args
|
||||
|
||||
# Some masks will be padded with zeros, e.g. R 0b11 G 0b1100
|
||||
# Calculate how many zeros each mask is padded with
|
||||
mask_offsets = []
|
||||
# And the maximum value of each channel without the padding
|
||||
mask_totals = []
|
||||
for mask in masks:
|
||||
offset = 0
|
||||
if mask != 0:
|
||||
while mask >> (offset + 1) << (offset + 1) == mask:
|
||||
offset += 1
|
||||
mask_offsets.append(offset)
|
||||
mask_totals.append(mask >> offset)
|
||||
|
||||
data = bytearray()
|
||||
bytecount = bitcount // 8
|
||||
dest_length = self.state.xsize * self.state.ysize * len(masks)
|
||||
while len(data) < dest_length:
|
||||
value = int.from_bytes(self.fd.read(bytecount), "little")
|
||||
for i, mask in enumerate(masks):
|
||||
masked_value = value & mask
|
||||
# Remove the zero padding, and scale it to 8 bits
|
||||
data += o8(
|
||||
int(((masked_value >> mask_offsets[i]) / mask_totals[i]) * 255)
|
||||
)
|
||||
self.set_as_raw(data)
|
||||
return -1, 0
|
||||
|
||||
|
||||
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
|
||||
if im.mode not in ("RGB", "RGBA", "L", "LA"):
|
||||
msg = f"cannot write mode {im.mode} as DDS"
|
||||
raise OSError(msg)
|
||||
|
||||
alpha = im.mode[-1] == "A"
|
||||
if im.mode[0] == "L":
|
||||
pixel_flags = DDPF.LUMINANCE
|
||||
rawmode = im.mode
|
||||
if alpha:
|
||||
rgba_mask = [0x000000FF, 0x000000FF, 0x000000FF]
|
||||
else:
|
||||
rgba_mask = [0xFF000000, 0xFF000000, 0xFF000000]
|
||||
else:
|
||||
pixel_flags = DDPF.RGB
|
||||
rawmode = im.mode[::-1]
|
||||
rgba_mask = [0x00FF0000, 0x0000FF00, 0x000000FF]
|
||||
|
||||
if alpha:
|
||||
r, g, b, a = im.split()
|
||||
im = Image.merge("RGBA", (a, r, g, b))
|
||||
if alpha:
|
||||
pixel_flags |= DDPF.ALPHAPIXELS
|
||||
rgba_mask.append(0xFF000000 if alpha else 0)
|
||||
|
||||
flags = DDSD.CAPS | DDSD.HEIGHT | DDSD.WIDTH | DDSD.PITCH | DDSD.PIXELFORMAT
|
||||
bitcount = len(im.getbands()) * 8
|
||||
pitch = (im.width * bitcount + 7) // 8
|
||||
|
||||
fp.write(
|
||||
o32(DDS_MAGIC)
|
||||
+ struct.pack(
|
||||
"<7I",
|
||||
124, # header size
|
||||
flags, # flags
|
||||
im.height,
|
||||
im.width,
|
||||
pitch,
|
||||
0, # depth
|
||||
0, # mipmaps
|
||||
)
|
||||
+ struct.pack("11I", *((0,) * 11)) # reserved
|
||||
# pfsize, pfflags, fourcc, bitcount
|
||||
+ struct.pack("<4I", 32, pixel_flags, 0, bitcount)
|
||||
+ struct.pack("<4I", *rgba_mask) # dwRGBABitMask
|
||||
+ struct.pack("<5I", DDSCAPS.TEXTURE, 0, 0, 0, 0)
|
||||
)
|
||||
ImageFile._save(
|
||||
im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, 0, 1))]
|
||||
)
|
||||
|
||||
|
||||
def _accept(prefix: bytes) -> bool:
|
||||
return prefix[:4] == b"DDS "
|
||||
|
||||
|
||||
Image.register_open(DdsImageFile.format, DdsImageFile, _accept)
|
||||
Image.register_decoder("dds_rgb", DdsRgbDecoder)
|
||||
Image.register_save(DdsImageFile.format, _save)
|
||||
Image.register_extension(DdsImageFile.format, ".dds")
|
||||
@@ -1,474 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# EPS file handling
|
||||
#
|
||||
# History:
|
||||
# 1995-09-01 fl Created (0.1)
|
||||
# 1996-05-18 fl Don't choke on "atend" fields, Ghostscript interface (0.2)
|
||||
# 1996-08-22 fl Don't choke on floating point BoundingBox values
|
||||
# 1996-08-23 fl Handle files from Macintosh (0.3)
|
||||
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
|
||||
# 2003-09-07 fl Check gs.close status (from Federico Di Gregorio) (0.5)
|
||||
# 2014-05-07 e Handling of EPS with binary preview and fixed resolution
|
||||
# resizing
|
||||
#
|
||||
# Copyright (c) 1997-2003 by Secret Labs AB.
|
||||
# Copyright (c) 1995-2003 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
from typing import IO
|
||||
|
||||
from . import Image, ImageFile
|
||||
from ._binary import i32le as i32
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
split = re.compile(r"^%%([^:]*):[ \t]*(.*)[ \t]*$")
|
||||
field = re.compile(r"^%[%!\w]([^:]*)[ \t]*$")
|
||||
|
||||
gs_binary: str | bool | None = None
|
||||
gs_windows_binary = None
|
||||
|
||||
|
||||
def has_ghostscript() -> bool:
|
||||
global gs_binary, gs_windows_binary
|
||||
if gs_binary is None:
|
||||
if sys.platform.startswith("win"):
|
||||
if gs_windows_binary is None:
|
||||
import shutil
|
||||
|
||||
for binary in ("gswin32c", "gswin64c", "gs"):
|
||||
if shutil.which(binary) is not None:
|
||||
gs_windows_binary = binary
|
||||
break
|
||||
else:
|
||||
gs_windows_binary = False
|
||||
gs_binary = gs_windows_binary
|
||||
else:
|
||||
try:
|
||||
subprocess.check_call(["gs", "--version"], stdout=subprocess.DEVNULL)
|
||||
gs_binary = "gs"
|
||||
except OSError:
|
||||
gs_binary = False
|
||||
return gs_binary is not False
|
||||
|
||||
|
||||
def Ghostscript(
|
||||
tile: list[ImageFile._Tile],
|
||||
size: tuple[int, int],
|
||||
fp: IO[bytes],
|
||||
scale: int = 1,
|
||||
transparency: bool = False,
|
||||
) -> Image.core.ImagingCore:
|
||||
"""Render an image using Ghostscript"""
|
||||
global gs_binary
|
||||
if not has_ghostscript():
|
||||
msg = "Unable to locate Ghostscript on paths"
|
||||
raise OSError(msg)
|
||||
assert isinstance(gs_binary, str)
|
||||
|
||||
# Unpack decoder tile
|
||||
args = tile[0].args
|
||||
assert isinstance(args, tuple)
|
||||
length, bbox = args
|
||||
|
||||
# Hack to support hi-res rendering
|
||||
scale = int(scale) or 1
|
||||
width = size[0] * scale
|
||||
height = size[1] * scale
|
||||
# resolution is dependent on bbox and size
|
||||
res_x = 72.0 * width / (bbox[2] - bbox[0])
|
||||
res_y = 72.0 * height / (bbox[3] - bbox[1])
|
||||
|
||||
out_fd, outfile = tempfile.mkstemp()
|
||||
os.close(out_fd)
|
||||
|
||||
infile_temp = None
|
||||
if hasattr(fp, "name") and os.path.exists(fp.name):
|
||||
infile = fp.name
|
||||
else:
|
||||
in_fd, infile_temp = tempfile.mkstemp()
|
||||
os.close(in_fd)
|
||||
infile = infile_temp
|
||||
|
||||
# Ignore length and offset!
|
||||
# Ghostscript can read it
|
||||
# Copy whole file to read in Ghostscript
|
||||
with open(infile_temp, "wb") as f:
|
||||
# fetch length of fp
|
||||
fp.seek(0, io.SEEK_END)
|
||||
fsize = fp.tell()
|
||||
# ensure start position
|
||||
# go back
|
||||
fp.seek(0)
|
||||
lengthfile = fsize
|
||||
while lengthfile > 0:
|
||||
s = fp.read(min(lengthfile, 100 * 1024))
|
||||
if not s:
|
||||
break
|
||||
lengthfile -= len(s)
|
||||
f.write(s)
|
||||
|
||||
if transparency:
|
||||
# "RGBA"
|
||||
device = "pngalpha"
|
||||
else:
|
||||
# "pnmraw" automatically chooses between
|
||||
# PBM ("1"), PGM ("L"), and PPM ("RGB").
|
||||
device = "pnmraw"
|
||||
|
||||
# Build Ghostscript command
|
||||
command = [
|
||||
gs_binary,
|
||||
"-q", # quiet mode
|
||||
f"-g{width:d}x{height:d}", # set output geometry (pixels)
|
||||
f"-r{res_x:f}x{res_y:f}", # set input DPI (dots per inch)
|
||||
"-dBATCH", # exit after processing
|
||||
"-dNOPAUSE", # don't pause between pages
|
||||
"-dSAFER", # safe mode
|
||||
f"-sDEVICE={device}",
|
||||
f"-sOutputFile={outfile}", # output file
|
||||
# adjust for image origin
|
||||
"-c",
|
||||
f"{-bbox[0]} {-bbox[1]} translate",
|
||||
"-f",
|
||||
infile, # input file
|
||||
# showpage (see https://bugs.ghostscript.com/show_bug.cgi?id=698272)
|
||||
"-c",
|
||||
"showpage",
|
||||
]
|
||||
|
||||
# push data through Ghostscript
|
||||
try:
|
||||
startupinfo = None
|
||||
if sys.platform.startswith("win"):
|
||||
startupinfo = subprocess.STARTUPINFO()
|
||||
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
|
||||
subprocess.check_call(command, startupinfo=startupinfo)
|
||||
with Image.open(outfile) as out_im:
|
||||
out_im.load()
|
||||
return out_im.im.copy()
|
||||
finally:
|
||||
try:
|
||||
os.unlink(outfile)
|
||||
if infile_temp:
|
||||
os.unlink(infile_temp)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
def _accept(prefix: bytes) -> bool:
|
||||
return prefix[:4] == b"%!PS" or (len(prefix) >= 4 and i32(prefix) == 0xC6D3D0C5)
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for Encapsulated PostScript. This plugin supports only
|
||||
# a few variants of this format.
|
||||
|
||||
|
||||
class EpsImageFile(ImageFile.ImageFile):
|
||||
"""EPS File Parser for the Python Imaging Library"""
|
||||
|
||||
format = "EPS"
|
||||
format_description = "Encapsulated Postscript"
|
||||
|
||||
mode_map = {1: "L", 2: "LAB", 3: "RGB", 4: "CMYK"}
|
||||
|
||||
def _open(self) -> None:
|
||||
(length, offset) = self._find_offset(self.fp)
|
||||
|
||||
# go to offset - start of "%!PS"
|
||||
self.fp.seek(offset)
|
||||
|
||||
self._mode = "RGB"
|
||||
|
||||
# When reading header comments, the first comment is used.
|
||||
# When reading trailer comments, the last comment is used.
|
||||
bounding_box: list[int] | None = None
|
||||
imagedata_size: tuple[int, int] | None = None
|
||||
|
||||
byte_arr = bytearray(255)
|
||||
bytes_mv = memoryview(byte_arr)
|
||||
bytes_read = 0
|
||||
reading_header_comments = True
|
||||
reading_trailer_comments = False
|
||||
trailer_reached = False
|
||||
|
||||
def check_required_header_comments() -> None:
|
||||
"""
|
||||
The EPS specification requires that some headers exist.
|
||||
This should be checked when the header comments formally end,
|
||||
when image data starts, or when the file ends, whichever comes first.
|
||||
"""
|
||||
if "PS-Adobe" not in self.info:
|
||||
msg = 'EPS header missing "%!PS-Adobe" comment'
|
||||
raise SyntaxError(msg)
|
||||
if "BoundingBox" not in self.info:
|
||||
msg = 'EPS header missing "%%BoundingBox" comment'
|
||||
raise SyntaxError(msg)
|
||||
|
||||
def read_comment(s: str) -> bool:
|
||||
nonlocal bounding_box, reading_trailer_comments
|
||||
try:
|
||||
m = split.match(s)
|
||||
except re.error as e:
|
||||
msg = "not an EPS file"
|
||||
raise SyntaxError(msg) from e
|
||||
|
||||
if not m:
|
||||
return False
|
||||
|
||||
k, v = m.group(1, 2)
|
||||
self.info[k] = v
|
||||
if k == "BoundingBox":
|
||||
if v == "(atend)":
|
||||
reading_trailer_comments = True
|
||||
elif not bounding_box or (trailer_reached and reading_trailer_comments):
|
||||
try:
|
||||
# Note: The DSC spec says that BoundingBox
|
||||
# fields should be integers, but some drivers
|
||||
# put floating point values there anyway.
|
||||
bounding_box = [int(float(i)) for i in v.split()]
|
||||
except Exception:
|
||||
pass
|
||||
return True
|
||||
|
||||
while True:
|
||||
byte = self.fp.read(1)
|
||||
if byte == b"":
|
||||
# if we didn't read a byte we must be at the end of the file
|
||||
if bytes_read == 0:
|
||||
if reading_header_comments:
|
||||
check_required_header_comments()
|
||||
break
|
||||
elif byte in b"\r\n":
|
||||
# if we read a line ending character, ignore it and parse what
|
||||
# we have already read. if we haven't read any other characters,
|
||||
# continue reading
|
||||
if bytes_read == 0:
|
||||
continue
|
||||
else:
|
||||
# ASCII/hexadecimal lines in an EPS file must not exceed
|
||||
# 255 characters, not including line ending characters
|
||||
if bytes_read >= 255:
|
||||
# only enforce this for lines starting with a "%",
|
||||
# otherwise assume it's binary data
|
||||
if byte_arr[0] == ord("%"):
|
||||
msg = "not an EPS file"
|
||||
raise SyntaxError(msg)
|
||||
else:
|
||||
if reading_header_comments:
|
||||
check_required_header_comments()
|
||||
reading_header_comments = False
|
||||
# reset bytes_read so we can keep reading
|
||||
# data until the end of the line
|
||||
bytes_read = 0
|
||||
byte_arr[bytes_read] = byte[0]
|
||||
bytes_read += 1
|
||||
continue
|
||||
|
||||
if reading_header_comments:
|
||||
# Load EPS header
|
||||
|
||||
# if this line doesn't start with a "%",
|
||||
# or does start with "%%EndComments",
|
||||
# then we've reached the end of the header/comments
|
||||
if byte_arr[0] != ord("%") or bytes_mv[:13] == b"%%EndComments":
|
||||
check_required_header_comments()
|
||||
reading_header_comments = False
|
||||
continue
|
||||
|
||||
s = str(bytes_mv[:bytes_read], "latin-1")
|
||||
if not read_comment(s):
|
||||
m = field.match(s)
|
||||
if m:
|
||||
k = m.group(1)
|
||||
if k[:8] == "PS-Adobe":
|
||||
self.info["PS-Adobe"] = k[9:]
|
||||
else:
|
||||
self.info[k] = ""
|
||||
elif s[0] == "%":
|
||||
# handle non-DSC PostScript comments that some
|
||||
# tools mistakenly put in the Comments section
|
||||
pass
|
||||
else:
|
||||
msg = "bad EPS header"
|
||||
raise OSError(msg)
|
||||
elif bytes_mv[:11] == b"%ImageData:":
|
||||
# Check for an "ImageData" descriptor
|
||||
# https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#50577413_pgfId-1035096
|
||||
|
||||
# If we've already read an "ImageData" descriptor,
|
||||
# don't read another one.
|
||||
if imagedata_size:
|
||||
bytes_read = 0
|
||||
continue
|
||||
|
||||
# Values:
|
||||
# columns
|
||||
# rows
|
||||
# bit depth (1 or 8)
|
||||
# mode (1: L, 2: LAB, 3: RGB, 4: CMYK)
|
||||
# number of padding channels
|
||||
# block size (number of bytes per row per channel)
|
||||
# binary/ascii (1: binary, 2: ascii)
|
||||
# data start identifier (the image data follows after a single line
|
||||
# consisting only of this quoted value)
|
||||
image_data_values = byte_arr[11:bytes_read].split(None, 7)
|
||||
columns, rows, bit_depth, mode_id = (
|
||||
int(value) for value in image_data_values[:4]
|
||||
)
|
||||
|
||||
if bit_depth == 1:
|
||||
self._mode = "1"
|
||||
elif bit_depth == 8:
|
||||
try:
|
||||
self._mode = self.mode_map[mode_id]
|
||||
except ValueError:
|
||||
break
|
||||
else:
|
||||
break
|
||||
|
||||
# Parse the columns and rows after checking the bit depth and mode
|
||||
# in case the bit depth and/or mode are invalid.
|
||||
imagedata_size = columns, rows
|
||||
elif bytes_mv[:5] == b"%%EOF":
|
||||
break
|
||||
elif trailer_reached and reading_trailer_comments:
|
||||
# Load EPS trailer
|
||||
s = str(bytes_mv[:bytes_read], "latin-1")
|
||||
read_comment(s)
|
||||
elif bytes_mv[:9] == b"%%Trailer":
|
||||
trailer_reached = True
|
||||
bytes_read = 0
|
||||
|
||||
# A "BoundingBox" is always required,
|
||||
# even if an "ImageData" descriptor size exists.
|
||||
if not bounding_box:
|
||||
msg = "cannot determine EPS bounding box"
|
||||
raise OSError(msg)
|
||||
|
||||
# An "ImageData" size takes precedence over the "BoundingBox".
|
||||
self._size = imagedata_size or (
|
||||
bounding_box[2] - bounding_box[0],
|
||||
bounding_box[3] - bounding_box[1],
|
||||
)
|
||||
|
||||
self.tile = [
|
||||
ImageFile._Tile("eps", (0, 0) + self.size, offset, (length, bounding_box))
|
||||
]
|
||||
|
||||
def _find_offset(self, fp: IO[bytes]) -> tuple[int, int]:
|
||||
s = fp.read(4)
|
||||
|
||||
if s == b"%!PS":
|
||||
# for HEAD without binary preview
|
||||
fp.seek(0, io.SEEK_END)
|
||||
length = fp.tell()
|
||||
offset = 0
|
||||
elif i32(s) == 0xC6D3D0C5:
|
||||
# FIX for: Some EPS file not handled correctly / issue #302
|
||||
# EPS can contain binary data
|
||||
# or start directly with latin coding
|
||||
# more info see:
|
||||
# https://web.archive.org/web/20160528181353/http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf
|
||||
s = fp.read(8)
|
||||
offset = i32(s)
|
||||
length = i32(s, 4)
|
||||
else:
|
||||
msg = "not an EPS file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
return length, offset
|
||||
|
||||
def load(
|
||||
self, scale: int = 1, transparency: bool = False
|
||||
) -> Image.core.PixelAccess | None:
|
||||
# Load EPS via Ghostscript
|
||||
if self.tile:
|
||||
self.im = Ghostscript(self.tile, self.size, self.fp, scale, transparency)
|
||||
self._mode = self.im.mode
|
||||
self._size = self.im.size
|
||||
self.tile = []
|
||||
return Image.Image.load(self)
|
||||
|
||||
def load_seek(self, pos: int) -> None:
|
||||
# we can't incrementally load, so force ImageFile.parser to
|
||||
# use our custom load method by defining this method.
|
||||
pass
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes, eps: int = 1) -> None:
|
||||
"""EPS Writer for the Python Imaging Library."""
|
||||
|
||||
# make sure image data is available
|
||||
im.load()
|
||||
|
||||
# determine PostScript image mode
|
||||
if im.mode == "L":
|
||||
operator = (8, 1, b"image")
|
||||
elif im.mode == "RGB":
|
||||
operator = (8, 3, b"false 3 colorimage")
|
||||
elif im.mode == "CMYK":
|
||||
operator = (8, 4, b"false 4 colorimage")
|
||||
else:
|
||||
msg = "image mode is not supported"
|
||||
raise ValueError(msg)
|
||||
|
||||
if eps:
|
||||
# write EPS header
|
||||
fp.write(b"%!PS-Adobe-3.0 EPSF-3.0\n")
|
||||
fp.write(b"%%Creator: PIL 0.1 EpsEncode\n")
|
||||
# fp.write("%%CreationDate: %s"...)
|
||||
fp.write(b"%%%%BoundingBox: 0 0 %d %d\n" % im.size)
|
||||
fp.write(b"%%Pages: 1\n")
|
||||
fp.write(b"%%EndComments\n")
|
||||
fp.write(b"%%Page: 1 1\n")
|
||||
fp.write(b"%%ImageData: %d %d " % im.size)
|
||||
fp.write(b'%d %d 0 1 1 "%s"\n' % operator)
|
||||
|
||||
# image header
|
||||
fp.write(b"gsave\n")
|
||||
fp.write(b"10 dict begin\n")
|
||||
fp.write(b"/buf %d string def\n" % (im.size[0] * operator[1]))
|
||||
fp.write(b"%d %d scale\n" % im.size)
|
||||
fp.write(b"%d %d 8\n" % im.size) # <= bits
|
||||
fp.write(b"[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1]))
|
||||
fp.write(b"{ currentfile buf readhexstring pop } bind\n")
|
||||
fp.write(operator[2] + b"\n")
|
||||
if hasattr(fp, "flush"):
|
||||
fp.flush()
|
||||
|
||||
ImageFile._save(im, fp, [ImageFile._Tile("eps", (0, 0) + im.size, 0, None)])
|
||||
|
||||
fp.write(b"\n%%%%EndBinary\n")
|
||||
fp.write(b"grestore end\n")
|
||||
if hasattr(fp, "flush"):
|
||||
fp.flush()
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
Image.register_open(EpsImageFile.format, EpsImageFile, _accept)
|
||||
|
||||
Image.register_save(EpsImageFile.format, _save)
|
||||
|
||||
Image.register_extensions(EpsImageFile.format, [".ps", ".eps"])
|
||||
|
||||
Image.register_mime(EpsImageFile.format, "application/postscript")
|
||||
@@ -1,381 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# EXIF tags
|
||||
#
|
||||
# Copyright (c) 2003 by Secret Labs AB
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
"""
|
||||
This module provides constants and clear-text names for various
|
||||
well-known EXIF tags.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from enum import IntEnum
|
||||
|
||||
|
||||
class Base(IntEnum):
|
||||
# possibly incomplete
|
||||
InteropIndex = 0x0001
|
||||
ProcessingSoftware = 0x000B
|
||||
NewSubfileType = 0x00FE
|
||||
SubfileType = 0x00FF
|
||||
ImageWidth = 0x0100
|
||||
ImageLength = 0x0101
|
||||
BitsPerSample = 0x0102
|
||||
Compression = 0x0103
|
||||
PhotometricInterpretation = 0x0106
|
||||
Thresholding = 0x0107
|
||||
CellWidth = 0x0108
|
||||
CellLength = 0x0109
|
||||
FillOrder = 0x010A
|
||||
DocumentName = 0x010D
|
||||
ImageDescription = 0x010E
|
||||
Make = 0x010F
|
||||
Model = 0x0110
|
||||
StripOffsets = 0x0111
|
||||
Orientation = 0x0112
|
||||
SamplesPerPixel = 0x0115
|
||||
RowsPerStrip = 0x0116
|
||||
StripByteCounts = 0x0117
|
||||
MinSampleValue = 0x0118
|
||||
MaxSampleValue = 0x0119
|
||||
XResolution = 0x011A
|
||||
YResolution = 0x011B
|
||||
PlanarConfiguration = 0x011C
|
||||
PageName = 0x011D
|
||||
FreeOffsets = 0x0120
|
||||
FreeByteCounts = 0x0121
|
||||
GrayResponseUnit = 0x0122
|
||||
GrayResponseCurve = 0x0123
|
||||
T4Options = 0x0124
|
||||
T6Options = 0x0125
|
||||
ResolutionUnit = 0x0128
|
||||
PageNumber = 0x0129
|
||||
TransferFunction = 0x012D
|
||||
Software = 0x0131
|
||||
DateTime = 0x0132
|
||||
Artist = 0x013B
|
||||
HostComputer = 0x013C
|
||||
Predictor = 0x013D
|
||||
WhitePoint = 0x013E
|
||||
PrimaryChromaticities = 0x013F
|
||||
ColorMap = 0x0140
|
||||
HalftoneHints = 0x0141
|
||||
TileWidth = 0x0142
|
||||
TileLength = 0x0143
|
||||
TileOffsets = 0x0144
|
||||
TileByteCounts = 0x0145
|
||||
SubIFDs = 0x014A
|
||||
InkSet = 0x014C
|
||||
InkNames = 0x014D
|
||||
NumberOfInks = 0x014E
|
||||
DotRange = 0x0150
|
||||
TargetPrinter = 0x0151
|
||||
ExtraSamples = 0x0152
|
||||
SampleFormat = 0x0153
|
||||
SMinSampleValue = 0x0154
|
||||
SMaxSampleValue = 0x0155
|
||||
TransferRange = 0x0156
|
||||
ClipPath = 0x0157
|
||||
XClipPathUnits = 0x0158
|
||||
YClipPathUnits = 0x0159
|
||||
Indexed = 0x015A
|
||||
JPEGTables = 0x015B
|
||||
OPIProxy = 0x015F
|
||||
JPEGProc = 0x0200
|
||||
JpegIFOffset = 0x0201
|
||||
JpegIFByteCount = 0x0202
|
||||
JpegRestartInterval = 0x0203
|
||||
JpegLosslessPredictors = 0x0205
|
||||
JpegPointTransforms = 0x0206
|
||||
JpegQTables = 0x0207
|
||||
JpegDCTables = 0x0208
|
||||
JpegACTables = 0x0209
|
||||
YCbCrCoefficients = 0x0211
|
||||
YCbCrSubSampling = 0x0212
|
||||
YCbCrPositioning = 0x0213
|
||||
ReferenceBlackWhite = 0x0214
|
||||
XMLPacket = 0x02BC
|
||||
RelatedImageFileFormat = 0x1000
|
||||
RelatedImageWidth = 0x1001
|
||||
RelatedImageLength = 0x1002
|
||||
Rating = 0x4746
|
||||
RatingPercent = 0x4749
|
||||
ImageID = 0x800D
|
||||
CFARepeatPatternDim = 0x828D
|
||||
BatteryLevel = 0x828F
|
||||
Copyright = 0x8298
|
||||
ExposureTime = 0x829A
|
||||
FNumber = 0x829D
|
||||
IPTCNAA = 0x83BB
|
||||
ImageResources = 0x8649
|
||||
ExifOffset = 0x8769
|
||||
InterColorProfile = 0x8773
|
||||
ExposureProgram = 0x8822
|
||||
SpectralSensitivity = 0x8824
|
||||
GPSInfo = 0x8825
|
||||
ISOSpeedRatings = 0x8827
|
||||
OECF = 0x8828
|
||||
Interlace = 0x8829
|
||||
TimeZoneOffset = 0x882A
|
||||
SelfTimerMode = 0x882B
|
||||
SensitivityType = 0x8830
|
||||
StandardOutputSensitivity = 0x8831
|
||||
RecommendedExposureIndex = 0x8832
|
||||
ISOSpeed = 0x8833
|
||||
ISOSpeedLatitudeyyy = 0x8834
|
||||
ISOSpeedLatitudezzz = 0x8835
|
||||
ExifVersion = 0x9000
|
||||
DateTimeOriginal = 0x9003
|
||||
DateTimeDigitized = 0x9004
|
||||
OffsetTime = 0x9010
|
||||
OffsetTimeOriginal = 0x9011
|
||||
OffsetTimeDigitized = 0x9012
|
||||
ComponentsConfiguration = 0x9101
|
||||
CompressedBitsPerPixel = 0x9102
|
||||
ShutterSpeedValue = 0x9201
|
||||
ApertureValue = 0x9202
|
||||
BrightnessValue = 0x9203
|
||||
ExposureBiasValue = 0x9204
|
||||
MaxApertureValue = 0x9205
|
||||
SubjectDistance = 0x9206
|
||||
MeteringMode = 0x9207
|
||||
LightSource = 0x9208
|
||||
Flash = 0x9209
|
||||
FocalLength = 0x920A
|
||||
Noise = 0x920D
|
||||
ImageNumber = 0x9211
|
||||
SecurityClassification = 0x9212
|
||||
ImageHistory = 0x9213
|
||||
TIFFEPStandardID = 0x9216
|
||||
MakerNote = 0x927C
|
||||
UserComment = 0x9286
|
||||
SubsecTime = 0x9290
|
||||
SubsecTimeOriginal = 0x9291
|
||||
SubsecTimeDigitized = 0x9292
|
||||
AmbientTemperature = 0x9400
|
||||
Humidity = 0x9401
|
||||
Pressure = 0x9402
|
||||
WaterDepth = 0x9403
|
||||
Acceleration = 0x9404
|
||||
CameraElevationAngle = 0x9405
|
||||
XPTitle = 0x9C9B
|
||||
XPComment = 0x9C9C
|
||||
XPAuthor = 0x9C9D
|
||||
XPKeywords = 0x9C9E
|
||||
XPSubject = 0x9C9F
|
||||
FlashPixVersion = 0xA000
|
||||
ColorSpace = 0xA001
|
||||
ExifImageWidth = 0xA002
|
||||
ExifImageHeight = 0xA003
|
||||
RelatedSoundFile = 0xA004
|
||||
ExifInteroperabilityOffset = 0xA005
|
||||
FlashEnergy = 0xA20B
|
||||
SpatialFrequencyResponse = 0xA20C
|
||||
FocalPlaneXResolution = 0xA20E
|
||||
FocalPlaneYResolution = 0xA20F
|
||||
FocalPlaneResolutionUnit = 0xA210
|
||||
SubjectLocation = 0xA214
|
||||
ExposureIndex = 0xA215
|
||||
SensingMethod = 0xA217
|
||||
FileSource = 0xA300
|
||||
SceneType = 0xA301
|
||||
CFAPattern = 0xA302
|
||||
CustomRendered = 0xA401
|
||||
ExposureMode = 0xA402
|
||||
WhiteBalance = 0xA403
|
||||
DigitalZoomRatio = 0xA404
|
||||
FocalLengthIn35mmFilm = 0xA405
|
||||
SceneCaptureType = 0xA406
|
||||
GainControl = 0xA407
|
||||
Contrast = 0xA408
|
||||
Saturation = 0xA409
|
||||
Sharpness = 0xA40A
|
||||
DeviceSettingDescription = 0xA40B
|
||||
SubjectDistanceRange = 0xA40C
|
||||
ImageUniqueID = 0xA420
|
||||
CameraOwnerName = 0xA430
|
||||
BodySerialNumber = 0xA431
|
||||
LensSpecification = 0xA432
|
||||
LensMake = 0xA433
|
||||
LensModel = 0xA434
|
||||
LensSerialNumber = 0xA435
|
||||
CompositeImage = 0xA460
|
||||
CompositeImageCount = 0xA461
|
||||
CompositeImageExposureTimes = 0xA462
|
||||
Gamma = 0xA500
|
||||
PrintImageMatching = 0xC4A5
|
||||
DNGVersion = 0xC612
|
||||
DNGBackwardVersion = 0xC613
|
||||
UniqueCameraModel = 0xC614
|
||||
LocalizedCameraModel = 0xC615
|
||||
CFAPlaneColor = 0xC616
|
||||
CFALayout = 0xC617
|
||||
LinearizationTable = 0xC618
|
||||
BlackLevelRepeatDim = 0xC619
|
||||
BlackLevel = 0xC61A
|
||||
BlackLevelDeltaH = 0xC61B
|
||||
BlackLevelDeltaV = 0xC61C
|
||||
WhiteLevel = 0xC61D
|
||||
DefaultScale = 0xC61E
|
||||
DefaultCropOrigin = 0xC61F
|
||||
DefaultCropSize = 0xC620
|
||||
ColorMatrix1 = 0xC621
|
||||
ColorMatrix2 = 0xC622
|
||||
CameraCalibration1 = 0xC623
|
||||
CameraCalibration2 = 0xC624
|
||||
ReductionMatrix1 = 0xC625
|
||||
ReductionMatrix2 = 0xC626
|
||||
AnalogBalance = 0xC627
|
||||
AsShotNeutral = 0xC628
|
||||
AsShotWhiteXY = 0xC629
|
||||
BaselineExposure = 0xC62A
|
||||
BaselineNoise = 0xC62B
|
||||
BaselineSharpness = 0xC62C
|
||||
BayerGreenSplit = 0xC62D
|
||||
LinearResponseLimit = 0xC62E
|
||||
CameraSerialNumber = 0xC62F
|
||||
LensInfo = 0xC630
|
||||
ChromaBlurRadius = 0xC631
|
||||
AntiAliasStrength = 0xC632
|
||||
ShadowScale = 0xC633
|
||||
DNGPrivateData = 0xC634
|
||||
MakerNoteSafety = 0xC635
|
||||
CalibrationIlluminant1 = 0xC65A
|
||||
CalibrationIlluminant2 = 0xC65B
|
||||
BestQualityScale = 0xC65C
|
||||
RawDataUniqueID = 0xC65D
|
||||
OriginalRawFileName = 0xC68B
|
||||
OriginalRawFileData = 0xC68C
|
||||
ActiveArea = 0xC68D
|
||||
MaskedAreas = 0xC68E
|
||||
AsShotICCProfile = 0xC68F
|
||||
AsShotPreProfileMatrix = 0xC690
|
||||
CurrentICCProfile = 0xC691
|
||||
CurrentPreProfileMatrix = 0xC692
|
||||
ColorimetricReference = 0xC6BF
|
||||
CameraCalibrationSignature = 0xC6F3
|
||||
ProfileCalibrationSignature = 0xC6F4
|
||||
AsShotProfileName = 0xC6F6
|
||||
NoiseReductionApplied = 0xC6F7
|
||||
ProfileName = 0xC6F8
|
||||
ProfileHueSatMapDims = 0xC6F9
|
||||
ProfileHueSatMapData1 = 0xC6FA
|
||||
ProfileHueSatMapData2 = 0xC6FB
|
||||
ProfileToneCurve = 0xC6FC
|
||||
ProfileEmbedPolicy = 0xC6FD
|
||||
ProfileCopyright = 0xC6FE
|
||||
ForwardMatrix1 = 0xC714
|
||||
ForwardMatrix2 = 0xC715
|
||||
PreviewApplicationName = 0xC716
|
||||
PreviewApplicationVersion = 0xC717
|
||||
PreviewSettingsName = 0xC718
|
||||
PreviewSettingsDigest = 0xC719
|
||||
PreviewColorSpace = 0xC71A
|
||||
PreviewDateTime = 0xC71B
|
||||
RawImageDigest = 0xC71C
|
||||
OriginalRawFileDigest = 0xC71D
|
||||
SubTileBlockSize = 0xC71E
|
||||
RowInterleaveFactor = 0xC71F
|
||||
ProfileLookTableDims = 0xC725
|
||||
ProfileLookTableData = 0xC726
|
||||
OpcodeList1 = 0xC740
|
||||
OpcodeList2 = 0xC741
|
||||
OpcodeList3 = 0xC74E
|
||||
NoiseProfile = 0xC761
|
||||
|
||||
|
||||
"""Maps EXIF tags to tag names."""
|
||||
TAGS = {
|
||||
**{i.value: i.name for i in Base},
|
||||
0x920C: "SpatialFrequencyResponse",
|
||||
0x9214: "SubjectLocation",
|
||||
0x9215: "ExposureIndex",
|
||||
0x828E: "CFAPattern",
|
||||
0x920B: "FlashEnergy",
|
||||
0x9216: "TIFF/EPStandardID",
|
||||
}
|
||||
|
||||
|
||||
class GPS(IntEnum):
|
||||
GPSVersionID = 0
|
||||
GPSLatitudeRef = 1
|
||||
GPSLatitude = 2
|
||||
GPSLongitudeRef = 3
|
||||
GPSLongitude = 4
|
||||
GPSAltitudeRef = 5
|
||||
GPSAltitude = 6
|
||||
GPSTimeStamp = 7
|
||||
GPSSatellites = 8
|
||||
GPSStatus = 9
|
||||
GPSMeasureMode = 10
|
||||
GPSDOP = 11
|
||||
GPSSpeedRef = 12
|
||||
GPSSpeed = 13
|
||||
GPSTrackRef = 14
|
||||
GPSTrack = 15
|
||||
GPSImgDirectionRef = 16
|
||||
GPSImgDirection = 17
|
||||
GPSMapDatum = 18
|
||||
GPSDestLatitudeRef = 19
|
||||
GPSDestLatitude = 20
|
||||
GPSDestLongitudeRef = 21
|
||||
GPSDestLongitude = 22
|
||||
GPSDestBearingRef = 23
|
||||
GPSDestBearing = 24
|
||||
GPSDestDistanceRef = 25
|
||||
GPSDestDistance = 26
|
||||
GPSProcessingMethod = 27
|
||||
GPSAreaInformation = 28
|
||||
GPSDateStamp = 29
|
||||
GPSDifferential = 30
|
||||
GPSHPositioningError = 31
|
||||
|
||||
|
||||
"""Maps EXIF GPS tags to tag names."""
|
||||
GPSTAGS = {i.value: i.name for i in GPS}
|
||||
|
||||
|
||||
class Interop(IntEnum):
|
||||
InteropIndex = 1
|
||||
InteropVersion = 2
|
||||
RelatedImageFileFormat = 4096
|
||||
RelatedImageWidth = 4097
|
||||
RelatedImageHeight = 4098
|
||||
|
||||
|
||||
class IFD(IntEnum):
|
||||
Exif = 34665
|
||||
GPSInfo = 34853
|
||||
Makernote = 37500
|
||||
Interop = 40965
|
||||
IFD1 = -1
|
||||
|
||||
|
||||
class LightSource(IntEnum):
|
||||
Unknown = 0
|
||||
Daylight = 1
|
||||
Fluorescent = 2
|
||||
Tungsten = 3
|
||||
Flash = 4
|
||||
Fine = 9
|
||||
Cloudy = 10
|
||||
Shade = 11
|
||||
DaylightFluorescent = 12
|
||||
DayWhiteFluorescent = 13
|
||||
CoolWhiteFluorescent = 14
|
||||
WhiteFluorescent = 15
|
||||
StandardLightA = 17
|
||||
StandardLightB = 18
|
||||
StandardLightC = 19
|
||||
D55 = 20
|
||||
D65 = 21
|
||||
D75 = 22
|
||||
D50 = 23
|
||||
ISO = 24
|
||||
Other = 255
|
||||
@@ -1,152 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# FITS file handling
|
||||
#
|
||||
# Copyright (c) 1998-2003 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import gzip
|
||||
import math
|
||||
|
||||
from . import Image, ImageFile
|
||||
|
||||
|
||||
def _accept(prefix: bytes) -> bool:
|
||||
return prefix[:6] == b"SIMPLE"
|
||||
|
||||
|
||||
class FitsImageFile(ImageFile.ImageFile):
|
||||
format = "FITS"
|
||||
format_description = "FITS"
|
||||
|
||||
def _open(self) -> None:
|
||||
assert self.fp is not None
|
||||
|
||||
headers: dict[bytes, bytes] = {}
|
||||
header_in_progress = False
|
||||
decoder_name = ""
|
||||
while True:
|
||||
header = self.fp.read(80)
|
||||
if not header:
|
||||
msg = "Truncated FITS file"
|
||||
raise OSError(msg)
|
||||
keyword = header[:8].strip()
|
||||
if keyword in (b"SIMPLE", b"XTENSION"):
|
||||
header_in_progress = True
|
||||
elif headers and not header_in_progress:
|
||||
# This is now a data unit
|
||||
break
|
||||
elif keyword == b"END":
|
||||
# Seek to the end of the header unit
|
||||
self.fp.seek(math.ceil(self.fp.tell() / 2880) * 2880)
|
||||
if not decoder_name:
|
||||
decoder_name, offset, args = self._parse_headers(headers)
|
||||
|
||||
header_in_progress = False
|
||||
continue
|
||||
|
||||
if decoder_name:
|
||||
# Keep going to read past the headers
|
||||
continue
|
||||
|
||||
value = header[8:].split(b"/")[0].strip()
|
||||
if value.startswith(b"="):
|
||||
value = value[1:].strip()
|
||||
if not headers and (not _accept(keyword) or value != b"T"):
|
||||
msg = "Not a FITS file"
|
||||
raise SyntaxError(msg)
|
||||
headers[keyword] = value
|
||||
|
||||
if not decoder_name:
|
||||
msg = "No image data"
|
||||
raise ValueError(msg)
|
||||
|
||||
offset += self.fp.tell() - 80
|
||||
self.tile = [ImageFile._Tile(decoder_name, (0, 0) + self.size, offset, args)]
|
||||
|
||||
def _get_size(
|
||||
self, headers: dict[bytes, bytes], prefix: bytes
|
||||
) -> tuple[int, int] | None:
|
||||
naxis = int(headers[prefix + b"NAXIS"])
|
||||
if naxis == 0:
|
||||
return None
|
||||
|
||||
if naxis == 1:
|
||||
return 1, int(headers[prefix + b"NAXIS1"])
|
||||
else:
|
||||
return int(headers[prefix + b"NAXIS1"]), int(headers[prefix + b"NAXIS2"])
|
||||
|
||||
def _parse_headers(
|
||||
self, headers: dict[bytes, bytes]
|
||||
) -> tuple[str, int, tuple[str | int, ...]]:
|
||||
prefix = b""
|
||||
decoder_name = "raw"
|
||||
offset = 0
|
||||
if (
|
||||
headers.get(b"XTENSION") == b"'BINTABLE'"
|
||||
and headers.get(b"ZIMAGE") == b"T"
|
||||
and headers[b"ZCMPTYPE"] == b"'GZIP_1 '"
|
||||
):
|
||||
no_prefix_size = self._get_size(headers, prefix) or (0, 0)
|
||||
number_of_bits = int(headers[b"BITPIX"])
|
||||
offset = no_prefix_size[0] * no_prefix_size[1] * (number_of_bits // 8)
|
||||
|
||||
prefix = b"Z"
|
||||
decoder_name = "fits_gzip"
|
||||
|
||||
size = self._get_size(headers, prefix)
|
||||
if not size:
|
||||
return "", 0, ()
|
||||
|
||||
self._size = size
|
||||
|
||||
number_of_bits = int(headers[prefix + b"BITPIX"])
|
||||
if number_of_bits == 8:
|
||||
self._mode = "L"
|
||||
elif number_of_bits == 16:
|
||||
self._mode = "I;16"
|
||||
elif number_of_bits == 32:
|
||||
self._mode = "I"
|
||||
elif number_of_bits in (-32, -64):
|
||||
self._mode = "F"
|
||||
|
||||
args: tuple[str | int, ...]
|
||||
if decoder_name == "raw":
|
||||
args = (self.mode, 0, -1)
|
||||
else:
|
||||
args = (number_of_bits,)
|
||||
return decoder_name, offset, args
|
||||
|
||||
|
||||
class FitsGzipDecoder(ImageFile.PyDecoder):
|
||||
_pulls_fd = True
|
||||
|
||||
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
|
||||
assert self.fd is not None
|
||||
value = gzip.decompress(self.fd.read())
|
||||
|
||||
rows = []
|
||||
offset = 0
|
||||
number_of_bits = min(self.args[0] // 8, 4)
|
||||
for y in range(self.state.ysize):
|
||||
row = bytearray()
|
||||
for x in range(self.state.xsize):
|
||||
row += value[offset + (4 - number_of_bits) : offset + 4]
|
||||
offset += 4
|
||||
rows.append(row)
|
||||
self.set_as_raw(bytes([pixel for row in rows[::-1] for pixel in row]))
|
||||
return -1, 0
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Registry
|
||||
|
||||
Image.register_open(FitsImageFile.format, FitsImageFile, _accept)
|
||||
Image.register_decoder("fits_gzip", FitsGzipDecoder)
|
||||
|
||||
Image.register_extensions(FitsImageFile.format, [".fit", ".fits"])
|
||||
@@ -1,175 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# FLI/FLC file handling.
|
||||
#
|
||||
# History:
|
||||
# 95-09-01 fl Created
|
||||
# 97-01-03 fl Fixed parser, setup decoder tile
|
||||
# 98-07-15 fl Renamed offset attribute to avoid name clash
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997-98.
|
||||
# Copyright (c) Fredrik Lundh 1995-97.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from . import Image, ImageFile, ImagePalette
|
||||
from ._binary import i16le as i16
|
||||
from ._binary import i32le as i32
|
||||
from ._binary import o8
|
||||
|
||||
#
|
||||
# decoder
|
||||
|
||||
|
||||
def _accept(prefix: bytes) -> bool:
|
||||
return (
|
||||
len(prefix) >= 6
|
||||
and i16(prefix, 4) in [0xAF11, 0xAF12]
|
||||
and i16(prefix, 14) in [0, 3] # flags
|
||||
)
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for the FLI/FLC animation format. Use the <b>seek</b>
|
||||
# method to load individual frames.
|
||||
|
||||
|
||||
class FliImageFile(ImageFile.ImageFile):
|
||||
format = "FLI"
|
||||
format_description = "Autodesk FLI/FLC Animation"
|
||||
_close_exclusive_fp_after_loading = False
|
||||
|
||||
def _open(self) -> None:
|
||||
# HEAD
|
||||
s = self.fp.read(128)
|
||||
if not (_accept(s) and s[20:22] == b"\x00\x00"):
|
||||
msg = "not an FLI/FLC file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
# frames
|
||||
self.n_frames = i16(s, 6)
|
||||
self.is_animated = self.n_frames > 1
|
||||
|
||||
# image characteristics
|
||||
self._mode = "P"
|
||||
self._size = i16(s, 8), i16(s, 10)
|
||||
|
||||
# animation speed
|
||||
duration = i32(s, 16)
|
||||
magic = i16(s, 4)
|
||||
if magic == 0xAF11:
|
||||
duration = (duration * 1000) // 70
|
||||
self.info["duration"] = duration
|
||||
|
||||
# look for palette
|
||||
palette = [(a, a, a) for a in range(256)]
|
||||
|
||||
s = self.fp.read(16)
|
||||
|
||||
self.__offset = 128
|
||||
|
||||
if i16(s, 4) == 0xF100:
|
||||
# prefix chunk; ignore it
|
||||
self.__offset = self.__offset + i32(s)
|
||||
self.fp.seek(self.__offset)
|
||||
s = self.fp.read(16)
|
||||
|
||||
if i16(s, 4) == 0xF1FA:
|
||||
# look for palette chunk
|
||||
number_of_subchunks = i16(s, 6)
|
||||
chunk_size: int | None = None
|
||||
for _ in range(number_of_subchunks):
|
||||
if chunk_size is not None:
|
||||
self.fp.seek(chunk_size - 6, os.SEEK_CUR)
|
||||
s = self.fp.read(6)
|
||||
chunk_type = i16(s, 4)
|
||||
if chunk_type in (4, 11):
|
||||
self._palette(palette, 2 if chunk_type == 11 else 0)
|
||||
break
|
||||
chunk_size = i32(s)
|
||||
if not chunk_size:
|
||||
break
|
||||
|
||||
self.palette = ImagePalette.raw(
|
||||
"RGB", b"".join(o8(r) + o8(g) + o8(b) for (r, g, b) in palette)
|
||||
)
|
||||
|
||||
# set things up to decode first frame
|
||||
self.__frame = -1
|
||||
self._fp = self.fp
|
||||
self.__rewind = self.fp.tell()
|
||||
self.seek(0)
|
||||
|
||||
def _palette(self, palette: list[tuple[int, int, int]], shift: int) -> None:
|
||||
# load palette
|
||||
|
||||
i = 0
|
||||
for e in range(i16(self.fp.read(2))):
|
||||
s = self.fp.read(2)
|
||||
i = i + s[0]
|
||||
n = s[1]
|
||||
if n == 0:
|
||||
n = 256
|
||||
s = self.fp.read(n * 3)
|
||||
for n in range(0, len(s), 3):
|
||||
r = s[n] << shift
|
||||
g = s[n + 1] << shift
|
||||
b = s[n + 2] << shift
|
||||
palette[i] = (r, g, b)
|
||||
i += 1
|
||||
|
||||
def seek(self, frame: int) -> None:
|
||||
if not self._seek_check(frame):
|
||||
return
|
||||
if frame < self.__frame:
|
||||
self._seek(0)
|
||||
|
||||
for f in range(self.__frame + 1, frame + 1):
|
||||
self._seek(f)
|
||||
|
||||
def _seek(self, frame: int) -> None:
|
||||
if frame == 0:
|
||||
self.__frame = -1
|
||||
self._fp.seek(self.__rewind)
|
||||
self.__offset = 128
|
||||
else:
|
||||
# ensure that the previous frame was loaded
|
||||
self.load()
|
||||
|
||||
if frame != self.__frame + 1:
|
||||
msg = f"cannot seek to frame {frame}"
|
||||
raise ValueError(msg)
|
||||
self.__frame = frame
|
||||
|
||||
# move to next frame
|
||||
self.fp = self._fp
|
||||
self.fp.seek(self.__offset)
|
||||
|
||||
s = self.fp.read(4)
|
||||
if not s:
|
||||
msg = "missing frame size"
|
||||
raise EOFError(msg)
|
||||
|
||||
framesize = i32(s)
|
||||
|
||||
self.decodermaxblock = framesize
|
||||
self.tile = [ImageFile._Tile("fli", (0, 0) + self.size, self.__offset, None)]
|
||||
|
||||
self.__offset += framesize
|
||||
|
||||
def tell(self) -> int:
|
||||
return self.__frame
|
||||
|
||||
|
||||
#
|
||||
# registry
|
||||
|
||||
Image.register_open(FliImageFile.format, FliImageFile, _accept)
|
||||
|
||||
Image.register_extensions(FliImageFile.format, [".fli", ".flc"])
|
||||
@@ -1,134 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# base class for raster font file parsers
|
||||
#
|
||||
# history:
|
||||
# 1997-06-05 fl created
|
||||
# 1997-08-19 fl restrict image width
|
||||
#
|
||||
# Copyright (c) 1997-1998 by Secret Labs AB
|
||||
# Copyright (c) 1997-1998 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import BinaryIO
|
||||
|
||||
from . import Image, _binary
|
||||
|
||||
WIDTH = 800
|
||||
|
||||
|
||||
def puti16(
|
||||
fp: BinaryIO, values: tuple[int, int, int, int, int, int, int, int, int, int]
|
||||
) -> None:
|
||||
"""Write network order (big-endian) 16-bit sequence"""
|
||||
for v in values:
|
||||
if v < 0:
|
||||
v += 65536
|
||||
fp.write(_binary.o16be(v))
|
||||
|
||||
|
||||
class FontFile:
|
||||
"""Base class for raster font file handlers."""
|
||||
|
||||
bitmap: Image.Image | None = None
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.info: dict[bytes, bytes | int] = {}
|
||||
self.glyph: list[
|
||||
tuple[
|
||||
tuple[int, int],
|
||||
tuple[int, int, int, int],
|
||||
tuple[int, int, int, int],
|
||||
Image.Image,
|
||||
]
|
||||
| None
|
||||
] = [None] * 256
|
||||
|
||||
def __getitem__(self, ix: int) -> (
|
||||
tuple[
|
||||
tuple[int, int],
|
||||
tuple[int, int, int, int],
|
||||
tuple[int, int, int, int],
|
||||
Image.Image,
|
||||
]
|
||||
| None
|
||||
):
|
||||
return self.glyph[ix]
|
||||
|
||||
def compile(self) -> None:
|
||||
"""Create metrics and bitmap"""
|
||||
|
||||
if self.bitmap:
|
||||
return
|
||||
|
||||
# create bitmap large enough to hold all data
|
||||
h = w = maxwidth = 0
|
||||
lines = 1
|
||||
for glyph in self.glyph:
|
||||
if glyph:
|
||||
d, dst, src, im = glyph
|
||||
h = max(h, src[3] - src[1])
|
||||
w = w + (src[2] - src[0])
|
||||
if w > WIDTH:
|
||||
lines += 1
|
||||
w = src[2] - src[0]
|
||||
maxwidth = max(maxwidth, w)
|
||||
|
||||
xsize = maxwidth
|
||||
ysize = lines * h
|
||||
|
||||
if xsize == 0 and ysize == 0:
|
||||
return
|
||||
|
||||
self.ysize = h
|
||||
|
||||
# paste glyphs into bitmap
|
||||
self.bitmap = Image.new("1", (xsize, ysize))
|
||||
self.metrics: list[
|
||||
tuple[tuple[int, int], tuple[int, int, int, int], tuple[int, int, int, int]]
|
||||
| None
|
||||
] = [None] * 256
|
||||
x = y = 0
|
||||
for i in range(256):
|
||||
glyph = self[i]
|
||||
if glyph:
|
||||
d, dst, src, im = glyph
|
||||
xx = src[2] - src[0]
|
||||
x0, y0 = x, y
|
||||
x = x + xx
|
||||
if x > WIDTH:
|
||||
x, y = 0, y + h
|
||||
x0, y0 = x, y
|
||||
x = xx
|
||||
s = src[0] + x0, src[1] + y0, src[2] + x0, src[3] + y0
|
||||
self.bitmap.paste(im.crop(src), s)
|
||||
self.metrics[i] = d, dst, s
|
||||
|
||||
def save(self, filename: str) -> None:
|
||||
"""Save font"""
|
||||
|
||||
self.compile()
|
||||
|
||||
# font data
|
||||
if not self.bitmap:
|
||||
msg = "No bitmap created"
|
||||
raise ValueError(msg)
|
||||
self.bitmap.save(os.path.splitext(filename)[0] + ".pbm", "PNG")
|
||||
|
||||
# font metrics
|
||||
with open(os.path.splitext(filename)[0] + ".pil", "wb") as fp:
|
||||
fp.write(b"PILfont\n")
|
||||
fp.write(f";;;;;;{self.ysize};\n".encode("ascii")) # HACK!!!
|
||||
fp.write(b"DATA\n")
|
||||
for id in range(256):
|
||||
m = self.metrics[id]
|
||||
if not m:
|
||||
puti16(fp, (0,) * 10)
|
||||
else:
|
||||
puti16(fp, m[0] + m[1] + m[2])
|
||||
@@ -1,257 +0,0 @@
|
||||
#
|
||||
# THIS IS WORK IN PROGRESS
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# FlashPix support for PIL
|
||||
#
|
||||
# History:
|
||||
# 97-01-25 fl Created (reads uncompressed RGB images only)
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1997.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
import olefile
|
||||
|
||||
from . import Image, ImageFile
|
||||
from ._binary import i32le as i32
|
||||
|
||||
# we map from colour field tuples to (mode, rawmode) descriptors
|
||||
MODES = {
|
||||
# opacity
|
||||
(0x00007FFE,): ("A", "L"),
|
||||
# monochrome
|
||||
(0x00010000,): ("L", "L"),
|
||||
(0x00018000, 0x00017FFE): ("RGBA", "LA"),
|
||||
# photo YCC
|
||||
(0x00020000, 0x00020001, 0x00020002): ("RGB", "YCC;P"),
|
||||
(0x00028000, 0x00028001, 0x00028002, 0x00027FFE): ("RGBA", "YCCA;P"),
|
||||
# standard RGB (NIFRGB)
|
||||
(0x00030000, 0x00030001, 0x00030002): ("RGB", "RGB"),
|
||||
(0x00038000, 0x00038001, 0x00038002, 0x00037FFE): ("RGBA", "RGBA"),
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
def _accept(prefix: bytes) -> bool:
|
||||
return prefix[:8] == olefile.MAGIC
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for the FlashPix images.
|
||||
|
||||
|
||||
class FpxImageFile(ImageFile.ImageFile):
|
||||
format = "FPX"
|
||||
format_description = "FlashPix"
|
||||
|
||||
def _open(self) -> None:
|
||||
#
|
||||
# read the OLE directory and see if this is a likely
|
||||
# to be a FlashPix file
|
||||
|
||||
try:
|
||||
self.ole = olefile.OleFileIO(self.fp)
|
||||
except OSError as e:
|
||||
msg = "not an FPX file; invalid OLE file"
|
||||
raise SyntaxError(msg) from e
|
||||
|
||||
root = self.ole.root
|
||||
if not root or root.clsid != "[HEROKU-API-KEY-REMOVED]":
|
||||
msg = "not an FPX file; bad root CLSID"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self._open_index(1)
|
||||
|
||||
def _open_index(self, index: int = 1) -> None:
|
||||
#
|
||||
# get the Image Contents Property Set
|
||||
|
||||
prop = self.ole.getproperties(
|
||||
[f"Data Object Store {index:06d}", "\005Image Contents"]
|
||||
)
|
||||
|
||||
# size (highest resolution)
|
||||
|
||||
assert isinstance(prop[0x1000002], int)
|
||||
assert isinstance(prop[0x1000003], int)
|
||||
self._size = prop[0x1000002], prop[0x1000003]
|
||||
|
||||
size = max(self.size)
|
||||
i = 1
|
||||
while size > 64:
|
||||
size = size // 2
|
||||
i += 1
|
||||
self.maxid = i - 1
|
||||
|
||||
# mode. instead of using a single field for this, flashpix
|
||||
# requires you to specify the mode for each channel in each
|
||||
# resolution subimage, and leaves it to the decoder to make
|
||||
# sure that they all match. for now, we'll cheat and assume
|
||||
# that this is always the case.
|
||||
|
||||
id = self.maxid << 16
|
||||
|
||||
s = prop[0x2000002 | id]
|
||||
|
||||
if not isinstance(s, bytes) or (bands := i32(s, 4)) > 4:
|
||||
msg = "Invalid number of bands"
|
||||
raise OSError(msg)
|
||||
|
||||
# note: for now, we ignore the "uncalibrated" flag
|
||||
colors = tuple(i32(s, 8 + i * 4) & 0x7FFFFFFF for i in range(bands))
|
||||
|
||||
self._mode, self.rawmode = MODES[colors]
|
||||
|
||||
# load JPEG tables, if any
|
||||
self.jpeg = {}
|
||||
for i in range(256):
|
||||
id = 0x3000001 | (i << 16)
|
||||
if id in prop:
|
||||
self.jpeg[i] = prop[id]
|
||||
|
||||
self._open_subimage(1, self.maxid)
|
||||
|
||||
def _open_subimage(self, index: int = 1, subimage: int = 0) -> None:
|
||||
#
|
||||
# setup tile descriptors for a given subimage
|
||||
|
||||
stream = [
|
||||
f"Data Object Store {index:06d}",
|
||||
f"Resolution {subimage:04d}",
|
||||
"Subimage 0000 Header",
|
||||
]
|
||||
|
||||
fp = self.ole.openstream(stream)
|
||||
|
||||
# skip prefix
|
||||
fp.read(28)
|
||||
|
||||
# header stream
|
||||
s = fp.read(36)
|
||||
|
||||
size = i32(s, 4), i32(s, 8)
|
||||
# tilecount = i32(s, 12)
|
||||
tilesize = i32(s, 16), i32(s, 20)
|
||||
# channels = i32(s, 24)
|
||||
offset = i32(s, 28)
|
||||
length = i32(s, 32)
|
||||
|
||||
if size != self.size:
|
||||
msg = "subimage mismatch"
|
||||
raise OSError(msg)
|
||||
|
||||
# get tile descriptors
|
||||
fp.seek(28 + offset)
|
||||
s = fp.read(i32(s, 12) * length)
|
||||
|
||||
x = y = 0
|
||||
xsize, ysize = size
|
||||
xtile, ytile = tilesize
|
||||
self.tile = []
|
||||
|
||||
for i in range(0, len(s), length):
|
||||
x1 = min(xsize, x + xtile)
|
||||
y1 = min(ysize, y + ytile)
|
||||
|
||||
compression = i32(s, i + 8)
|
||||
|
||||
if compression == 0:
|
||||
self.tile.append(
|
||||
ImageFile._Tile(
|
||||
"raw",
|
||||
(x, y, x1, y1),
|
||||
i32(s, i) + 28,
|
||||
(self.rawmode,),
|
||||
)
|
||||
)
|
||||
|
||||
elif compression == 1:
|
||||
# FIXME: the fill decoder is not implemented
|
||||
self.tile.append(
|
||||
ImageFile._Tile(
|
||||
"fill",
|
||||
(x, y, x1, y1),
|
||||
i32(s, i) + 28,
|
||||
(self.rawmode, s[12:16]),
|
||||
)
|
||||
)
|
||||
|
||||
elif compression == 2:
|
||||
internal_color_conversion = s[14]
|
||||
jpeg_tables = s[15]
|
||||
rawmode = self.rawmode
|
||||
|
||||
if internal_color_conversion:
|
||||
# The image is stored as usual (usually YCbCr).
|
||||
if rawmode == "RGBA":
|
||||
# For "RGBA", data is stored as YCbCrA based on
|
||||
# negative RGB. The following trick works around
|
||||
# this problem :
|
||||
jpegmode, rawmode = "YCbCrK", "CMYK"
|
||||
else:
|
||||
jpegmode = None # let the decoder decide
|
||||
|
||||
else:
|
||||
# The image is stored as defined by rawmode
|
||||
jpegmode = rawmode
|
||||
|
||||
self.tile.append(
|
||||
ImageFile._Tile(
|
||||
"jpeg",
|
||||
(x, y, x1, y1),
|
||||
i32(s, i) + 28,
|
||||
(rawmode, jpegmode),
|
||||
)
|
||||
)
|
||||
|
||||
# FIXME: jpeg tables are tile dependent; the prefix
|
||||
# data must be placed in the tile descriptor itself!
|
||||
|
||||
if jpeg_tables:
|
||||
self.tile_prefix = self.jpeg[jpeg_tables]
|
||||
|
||||
else:
|
||||
msg = "unknown/invalid compression"
|
||||
raise OSError(msg)
|
||||
|
||||
x = x + xtile
|
||||
if x >= xsize:
|
||||
x, y = 0, y + ytile
|
||||
if y >= ysize:
|
||||
break # isn't really required
|
||||
|
||||
self.stream = stream
|
||||
self._fp = self.fp
|
||||
self.fp = None
|
||||
|
||||
def load(self) -> Image.core.PixelAccess | None:
|
||||
if not self.fp:
|
||||
self.fp = self.ole.openstream(self.stream[:2] + ["Subimage 0000 Data"])
|
||||
|
||||
return ImageFile.ImageFile.load(self)
|
||||
|
||||
def close(self) -> None:
|
||||
self.ole.close()
|
||||
super().close()
|
||||
|
||||
def __exit__(self, *args: object) -> None:
|
||||
self.ole.close()
|
||||
super().__exit__()
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
Image.register_open(FpxImageFile.format, FpxImageFile, _accept)
|
||||
|
||||
Image.register_extension(FpxImageFile.format, ".fpx")
|
||||
@@ -1,115 +0,0 @@
|
||||
"""
|
||||
A Pillow loader for .ftc and .ftu files (FTEX)
|
||||
Jerome Leclanche <jerome@leclan.ch>
|
||||
|
||||
The contents of this file are hereby released in the public domain (CC0)
|
||||
Full text of the CC0 license:
|
||||
https://creativecommons.org/publicdomain/zero/1.0/
|
||||
|
||||
Independence War 2: Edge Of Chaos - Texture File Format - 16 October 2001
|
||||
|
||||
The textures used for 3D objects in Independence War 2: Edge Of Chaos are in a
|
||||
packed custom format called FTEX. This file format uses file extensions FTC
|
||||
and FTU.
|
||||
* FTC files are compressed textures (using standard texture compression).
|
||||
* FTU files are not compressed.
|
||||
Texture File Format
|
||||
The FTC and FTU texture files both use the same format. This
|
||||
has the following structure:
|
||||
{header}
|
||||
{format_directory}
|
||||
{data}
|
||||
Where:
|
||||
{header} = {
|
||||
u32:magic,
|
||||
u32:version,
|
||||
u32:width,
|
||||
u32:height,
|
||||
u32:mipmap_count,
|
||||
u32:format_count
|
||||
}
|
||||
|
||||
* The "magic" number is "FTEX".
|
||||
* "width" and "height" are the dimensions of the texture.
|
||||
* "mipmap_count" is the number of mipmaps in the texture.
|
||||
* "format_count" is the number of texture formats (different versions of the
|
||||
same texture) in this file.
|
||||
|
||||
{format_directory} = format_count * { u32:format, u32:where }
|
||||
|
||||
The format value is 0 for DXT1 compressed textures and 1 for 24-bit RGB
|
||||
uncompressed textures.
|
||||
The texture data for a format starts at the position "where" in the file.
|
||||
|
||||
Each set of texture data in the file has the following structure:
|
||||
{data} = format_count * { u32:mipmap_size, mipmap_size * { u8 } }
|
||||
* "mipmap_size" is the number of bytes in that mip level. For compressed
|
||||
textures this is the size of the texture data compressed with DXT1. For 24 bit
|
||||
uncompressed textures, this is 3 * width * height. Following this are the image
|
||||
bytes for that mipmap level.
|
||||
|
||||
Note: All data is stored in little-Endian (Intel) byte order.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import struct
|
||||
from enum import IntEnum
|
||||
from io import BytesIO
|
||||
|
||||
from . import Image, ImageFile
|
||||
|
||||
MAGIC = b"FTEX"
|
||||
|
||||
|
||||
class Format(IntEnum):
|
||||
DXT1 = 0
|
||||
UNCOMPRESSED = 1
|
||||
|
||||
|
||||
class FtexImageFile(ImageFile.ImageFile):
|
||||
format = "FTEX"
|
||||
format_description = "Texture File Format (IW2:EOC)"
|
||||
|
||||
def _open(self) -> None:
|
||||
if not _accept(self.fp.read(4)):
|
||||
msg = "not an FTEX file"
|
||||
raise SyntaxError(msg)
|
||||
struct.unpack("<i", self.fp.read(4)) # version
|
||||
self._size = struct.unpack("<2i", self.fp.read(8))
|
||||
mipmap_count, format_count = struct.unpack("<2i", self.fp.read(8))
|
||||
|
||||
self._mode = "RGB"
|
||||
|
||||
# Only support single-format files.
|
||||
# I don't know of any multi-format file.
|
||||
assert format_count == 1
|
||||
|
||||
format, where = struct.unpack("<2i", self.fp.read(8))
|
||||
self.fp.seek(where)
|
||||
(mipmap_size,) = struct.unpack("<i", self.fp.read(4))
|
||||
|
||||
data = self.fp.read(mipmap_size)
|
||||
|
||||
if format == Format.DXT1:
|
||||
self._mode = "RGBA"
|
||||
self.tile = [ImageFile._Tile("bcn", (0, 0) + self.size, 0, (1,))]
|
||||
elif format == Format.UNCOMPRESSED:
|
||||
self.tile = [ImageFile._Tile("raw", (0, 0) + self.size, 0, ("RGB", 0, 1))]
|
||||
else:
|
||||
msg = f"Invalid texture compression format: {repr(format)}"
|
||||
raise ValueError(msg)
|
||||
|
||||
self.fp.close()
|
||||
self.fp = BytesIO(data)
|
||||
|
||||
def load_seek(self, pos: int) -> None:
|
||||
pass
|
||||
|
||||
|
||||
def _accept(prefix: bytes) -> bool:
|
||||
return prefix[:4] == MAGIC
|
||||
|
||||
|
||||
Image.register_open(FtexImageFile.format, FtexImageFile, _accept)
|
||||
Image.register_extensions(FtexImageFile.format, [".ftc", ".ftu"])
|
||||
@@ -1,103 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library
|
||||
#
|
||||
# load a GIMP brush file
|
||||
#
|
||||
# History:
|
||||
# 96-03-14 fl Created
|
||||
# 16-01-08 es Version 2
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1996.
|
||||
# Copyright (c) Eric Soroos 2016.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
#
|
||||
# See https://github.com/GNOME/gimp/blob/mainline/devel-docs/gbr.txt for
|
||||
# format documentation.
|
||||
#
|
||||
# This code Interprets version 1 and 2 .gbr files.
|
||||
# Version 1 files are obsolete, and should not be used for new
|
||||
# brushes.
|
||||
# Version 2 files are saved by GIMP v2.8 (at least)
|
||||
# Version 3 files have a format specifier of 18 for 16bit floats in
|
||||
# the color depth field. This is currently unsupported by Pillow.
|
||||
from __future__ import annotations
|
||||
|
||||
from . import Image, ImageFile
|
||||
from ._binary import i32be as i32
|
||||
|
||||
|
||||
def _accept(prefix: bytes) -> bool:
|
||||
return len(prefix) >= 8 and i32(prefix, 0) >= 20 and i32(prefix, 4) in (1, 2)
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for the GIMP brush format.
|
||||
|
||||
|
||||
class GbrImageFile(ImageFile.ImageFile):
|
||||
format = "GBR"
|
||||
format_description = "GIMP brush file"
|
||||
|
||||
def _open(self) -> None:
|
||||
header_size = i32(self.fp.read(4))
|
||||
if header_size < 20:
|
||||
msg = "not a GIMP brush"
|
||||
raise SyntaxError(msg)
|
||||
version = i32(self.fp.read(4))
|
||||
if version not in (1, 2):
|
||||
msg = f"Unsupported GIMP brush version: {version}"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
width = i32(self.fp.read(4))
|
||||
height = i32(self.fp.read(4))
|
||||
color_depth = i32(self.fp.read(4))
|
||||
if width <= 0 or height <= 0:
|
||||
msg = "not a GIMP brush"
|
||||
raise SyntaxError(msg)
|
||||
if color_depth not in (1, 4):
|
||||
msg = f"Unsupported GIMP brush color depth: {color_depth}"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
if version == 1:
|
||||
comment_length = header_size - 20
|
||||
else:
|
||||
comment_length = header_size - 28
|
||||
magic_number = self.fp.read(4)
|
||||
if magic_number != b"GIMP":
|
||||
msg = "not a GIMP brush, bad magic number"
|
||||
raise SyntaxError(msg)
|
||||
self.info["spacing"] = i32(self.fp.read(4))
|
||||
|
||||
comment = self.fp.read(comment_length)[:-1]
|
||||
|
||||
if color_depth == 1:
|
||||
self._mode = "L"
|
||||
else:
|
||||
self._mode = "RGBA"
|
||||
|
||||
self._size = width, height
|
||||
|
||||
self.info["comment"] = comment
|
||||
|
||||
# Image might not be small
|
||||
Image._decompression_bomb_check(self.size)
|
||||
|
||||
# Data is an uncompressed block of w * h * bytes/pixel
|
||||
self._data_size = width * height * color_depth
|
||||
|
||||
def load(self) -> Image.core.PixelAccess | None:
|
||||
if self._im is None:
|
||||
self.im = Image.core.new(self.mode, self.size)
|
||||
self.frombytes(self.fp.read(self._data_size))
|
||||
return Image.Image.load(self)
|
||||
|
||||
|
||||
#
|
||||
# registry
|
||||
|
||||
|
||||
Image.register_open(GbrImageFile.format, GbrImageFile, _accept)
|
||||
Image.register_extension(GbrImageFile.format, ".gbr")
|
||||
@@ -1,102 +0,0 @@
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# GD file handling
|
||||
#
|
||||
# History:
|
||||
# 1996-04-12 fl Created
|
||||
#
|
||||
# Copyright (c) 1997 by Secret Labs AB.
|
||||
# Copyright (c) 1996 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
|
||||
"""
|
||||
.. note::
|
||||
This format cannot be automatically recognized, so the
|
||||
class is not registered for use with :py:func:`PIL.Image.open()`. To open a
|
||||
gd file, use the :py:func:`PIL.GdImageFile.open()` function instead.
|
||||
|
||||
.. warning::
|
||||
THE GD FORMAT IS NOT DESIGNED FOR DATA INTERCHANGE. This
|
||||
implementation is provided for convenience and demonstrational
|
||||
purposes only.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import IO
|
||||
|
||||
from . import ImageFile, ImagePalette, UnidentifiedImageError
|
||||
from ._binary import i16be as i16
|
||||
from ._binary import i32be as i32
|
||||
from ._typing import StrOrBytesPath
|
||||
|
||||
|
||||
class GdImageFile(ImageFile.ImageFile):
|
||||
"""
|
||||
Image plugin for the GD uncompressed format. Note that this format
|
||||
is not supported by the standard :py:func:`PIL.Image.open()` function. To use
|
||||
this plugin, you have to import the :py:mod:`PIL.GdImageFile` module and
|
||||
use the :py:func:`PIL.GdImageFile.open()` function.
|
||||
"""
|
||||
|
||||
format = "GD"
|
||||
format_description = "GD uncompressed images"
|
||||
|
||||
def _open(self) -> None:
|
||||
# Header
|
||||
assert self.fp is not None
|
||||
|
||||
s = self.fp.read(1037)
|
||||
|
||||
if i16(s) not in [65534, 65535]:
|
||||
msg = "Not a valid GD 2.x .gd file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
self._mode = "L" # FIXME: "P"
|
||||
self._size = i16(s, 2), i16(s, 4)
|
||||
|
||||
true_color = s[6]
|
||||
true_color_offset = 2 if true_color else 0
|
||||
|
||||
# transparency index
|
||||
tindex = i32(s, 7 + true_color_offset)
|
||||
if tindex < 256:
|
||||
self.info["transparency"] = tindex
|
||||
|
||||
self.palette = ImagePalette.raw(
|
||||
"XBGR", s[7 + true_color_offset + 4 : 7 + true_color_offset + 4 + 256 * 4]
|
||||
)
|
||||
|
||||
self.tile = [
|
||||
ImageFile._Tile(
|
||||
"raw",
|
||||
(0, 0) + self.size,
|
||||
7 + true_color_offset + 4 + 256 * 4,
|
||||
("L", 0, 1),
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
def open(fp: StrOrBytesPath | IO[bytes], mode: str = "r") -> GdImageFile:
|
||||
"""
|
||||
Load texture from a GD image file.
|
||||
|
||||
:param fp: GD file name, or an opened file handle.
|
||||
:param mode: Optional mode. In this version, if the mode argument
|
||||
is given, it must be "r".
|
||||
:returns: An image instance.
|
||||
:raises OSError: If the image could not be read.
|
||||
"""
|
||||
if mode != "r":
|
||||
msg = "bad mode"
|
||||
raise ValueError(msg)
|
||||
|
||||
try:
|
||||
return GdImageFile(fp)
|
||||
except SyntaxError as e:
|
||||
msg = "cannot identify this image file"
|
||||
raise UnidentifiedImageError(msg) from e
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,149 +0,0 @@
|
||||
#
|
||||
# Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# stuff to read (and render) GIMP gradient files
|
||||
#
|
||||
# History:
|
||||
# 97-08-23 fl Created
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1997.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
"""
|
||||
Stuff to translate curve segments to palette values (derived from
|
||||
the corresponding code in GIMP, written by Federico Mena Quintero.
|
||||
See the GIMP distribution for more information.)
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from math import log, pi, sin, sqrt
|
||||
from typing import IO, Callable
|
||||
|
||||
from ._binary import o8
|
||||
|
||||
EPSILON = 1e-10
|
||||
"""""" # Enable auto-doc for data member
|
||||
|
||||
|
||||
def linear(middle: float, pos: float) -> float:
|
||||
if pos <= middle:
|
||||
if middle < EPSILON:
|
||||
return 0.0
|
||||
else:
|
||||
return 0.5 * pos / middle
|
||||
else:
|
||||
pos = pos - middle
|
||||
middle = 1.0 - middle
|
||||
if middle < EPSILON:
|
||||
return 1.0
|
||||
else:
|
||||
return 0.5 + 0.5 * pos / middle
|
||||
|
||||
|
||||
def curved(middle: float, pos: float) -> float:
|
||||
return pos ** (log(0.5) / log(max(middle, EPSILON)))
|
||||
|
||||
|
||||
def sine(middle: float, pos: float) -> float:
|
||||
return (sin((-pi / 2.0) + pi * linear(middle, pos)) + 1.0) / 2.0
|
||||
|
||||
|
||||
def sphere_increasing(middle: float, pos: float) -> float:
|
||||
return sqrt(1.0 - (linear(middle, pos) - 1.0) ** 2)
|
||||
|
||||
|
||||
def sphere_decreasing(middle: float, pos: float) -> float:
|
||||
return 1.0 - sqrt(1.0 - linear(middle, pos) ** 2)
|
||||
|
||||
|
||||
SEGMENTS = [linear, curved, sine, sphere_increasing, sphere_decreasing]
|
||||
"""""" # Enable auto-doc for data member
|
||||
|
||||
|
||||
class GradientFile:
|
||||
gradient: (
|
||||
list[
|
||||
tuple[
|
||||
float,
|
||||
float,
|
||||
float,
|
||||
list[float],
|
||||
list[float],
|
||||
Callable[[float, float], float],
|
||||
]
|
||||
]
|
||||
| None
|
||||
) = None
|
||||
|
||||
def getpalette(self, entries: int = 256) -> tuple[bytes, str]:
|
||||
assert self.gradient is not None
|
||||
palette = []
|
||||
|
||||
ix = 0
|
||||
x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix]
|
||||
|
||||
for i in range(entries):
|
||||
x = i / (entries - 1)
|
||||
|
||||
while x1 < x:
|
||||
ix += 1
|
||||
x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix]
|
||||
|
||||
w = x1 - x0
|
||||
|
||||
if w < EPSILON:
|
||||
scale = segment(0.5, 0.5)
|
||||
else:
|
||||
scale = segment((xm - x0) / w, (x - x0) / w)
|
||||
|
||||
# expand to RGBA
|
||||
r = o8(int(255 * ((rgb1[0] - rgb0[0]) * scale + rgb0[0]) + 0.5))
|
||||
g = o8(int(255 * ((rgb1[1] - rgb0[1]) * scale + rgb0[1]) + 0.5))
|
||||
b = o8(int(255 * ((rgb1[2] - rgb0[2]) * scale + rgb0[2]) + 0.5))
|
||||
a = o8(int(255 * ((rgb1[3] - rgb0[3]) * scale + rgb0[3]) + 0.5))
|
||||
|
||||
# add to palette
|
||||
palette.append(r + g + b + a)
|
||||
|
||||
return b"".join(palette), "RGBA"
|
||||
|
||||
|
||||
class GimpGradientFile(GradientFile):
|
||||
"""File handler for GIMP's gradient format."""
|
||||
|
||||
def __init__(self, fp: IO[bytes]) -> None:
|
||||
if fp.readline()[:13] != b"GIMP Gradient":
|
||||
msg = "not a GIMP gradient file"
|
||||
raise SyntaxError(msg)
|
||||
|
||||
line = fp.readline()
|
||||
|
||||
# GIMP 1.2 gradient files don't contain a name, but GIMP 1.3 files do
|
||||
if line.startswith(b"Name: "):
|
||||
line = fp.readline().strip()
|
||||
|
||||
count = int(line)
|
||||
|
||||
self.gradient = []
|
||||
|
||||
for i in range(count):
|
||||
s = fp.readline().split()
|
||||
w = [float(x) for x in s[:11]]
|
||||
|
||||
x0, x1 = w[0], w[2]
|
||||
xm = w[1]
|
||||
rgb0 = w[3:7]
|
||||
rgb1 = w[7:11]
|
||||
|
||||
segment = SEGMENTS[int(s[11])]
|
||||
cspace = int(s[12])
|
||||
|
||||
if cspace != 0:
|
||||
msg = "cannot handle HSV colour space"
|
||||
raise OSError(msg)
|
||||
|
||||
self.gradient.append((x0, x1, xm, rgb0, rgb1, segment))
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user