chore: update project branding to CITRON

Signed-off-by: Zephyron <zephyron@citron-emu.org>
This commit is contained in:
Zephyron
2025-05-06 16:11:33 +10:00
parent 5e16e20427
commit b2d9cf4a01
119 changed files with 483 additions and 483 deletions

View File

@@ -27,8 +27,8 @@ private:
static constexpr inline ClassTokenType ClassToken() { return ::Kernel::ClassToken<CLASS>; } \
\
public: \
YUZU_NON_COPYABLE(CLASS); \
YUZU_NON_MOVEABLE(CLASS); \
CITRON_NON_COPYABLE(CLASS); \
CITRON_NON_MOVEABLE(CLASS); \
\
using BaseClass = BASE_CLASS; \
static constexpr TypeObj GetStaticTypeObj() { \
@@ -211,7 +211,7 @@ private:
template <typename T>
class KScopedAutoObject {
public:
YUZU_NON_COPYABLE(KScopedAutoObject);
CITRON_NON_COPYABLE(KScopedAutoObject);
constexpr KScopedAutoObject() = default;

View File

@@ -16,8 +16,8 @@ class KProcess;
class KAutoObjectWithListContainer {
public:
YUZU_NON_COPYABLE(KAutoObjectWithListContainer);
YUZU_NON_MOVEABLE(KAutoObjectWithListContainer);
CITRON_NON_COPYABLE(KAutoObjectWithListContainer);
CITRON_NON_MOVEABLE(KAutoObjectWithListContainer);
using ListType = boost::intrusive::rbtree<KAutoObjectWithList>;

View File

@@ -12,8 +12,8 @@ namespace Kernel {
template <typename T, bool ClearNode = false>
class KDynamicResourceManager {
YUZU_NON_COPYABLE(KDynamicResourceManager);
YUZU_NON_MOVEABLE(KDynamicResourceManager);
CITRON_NON_COPYABLE(KDynamicResourceManager);
CITRON_NON_MOVEABLE(KDynamicResourceManager);
public:
using DynamicSlabType = KDynamicSlabHeap<T, ClearNode>;

View File

@@ -13,8 +13,8 @@ namespace Kernel {
template <typename T, bool ClearNode = false>
class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
YUZU_NON_COPYABLE(KDynamicSlabHeap);
YUZU_NON_MOVEABLE(KDynamicSlabHeap);
CITRON_NON_COPYABLE(KDynamicSlabHeap);
CITRON_NON_MOVEABLE(KDynamicSlabHeap);
public:
constexpr KDynamicSlabHeap() = default;

View File

@@ -21,8 +21,8 @@ namespace Kernel {
class KernelCore;
class KHandleTable {
YUZU_NON_COPYABLE(KHandleTable);
YUZU_NON_MOVEABLE(KHandleTable);
CITRON_NON_COPYABLE(KHandleTable);
CITRON_NON_MOVEABLE(KHandleTable);
public:
static constexpr size_t MaxTableSize = 1024;

View File

@@ -17,8 +17,8 @@ class KMemoryRegion final : public Common::IntrusiveRedBlackTreeBaseNode<KMemory
friend class KMemoryRegionTree;
public:
YUZU_NON_COPYABLE(KMemoryRegion);
YUZU_NON_MOVEABLE(KMemoryRegion);
CITRON_NON_COPYABLE(KMemoryRegion);
CITRON_NON_MOVEABLE(KMemoryRegion);
constexpr KMemoryRegion() = default;
constexpr KMemoryRegion(u64 address, u64 last_address)
@@ -123,8 +123,8 @@ private:
Common::IntrusiveRedBlackTreeBaseTraits<KMemoryRegion>::TreeType<KMemoryRegion>;
public:
YUZU_NON_COPYABLE(KMemoryRegionTree);
YUZU_NON_MOVEABLE(KMemoryRegionTree);
CITRON_NON_COPYABLE(KMemoryRegionTree);
CITRON_NON_MOVEABLE(KMemoryRegionTree);
using value_type = TreeType::value_type;
using size_type = TreeType::size_type;
@@ -327,8 +327,8 @@ private:
class KMemoryRegionAllocator final {
public:
YUZU_NON_COPYABLE(KMemoryRegionAllocator);
YUZU_NON_MOVEABLE(KMemoryRegionAllocator);
CITRON_NON_COPYABLE(KMemoryRegionAllocator);
CITRON_NON_MOVEABLE(KMemoryRegionAllocator);
static constexpr size_t MaxMemoryRegions = 200;

View File

@@ -14,8 +14,8 @@ namespace Kernel {
namespace {
class KScopedLightLockPair {
YUZU_NON_COPYABLE(KScopedLightLockPair);
YUZU_NON_MOVEABLE(KScopedLightLockPair);
CITRON_NON_COPYABLE(KScopedLightLockPair);
CITRON_NON_MOVEABLE(KScopedLightLockPair);
private:
KLightLock* m_lower;

View File

@@ -49,8 +49,8 @@ class KResourceLimit;
class KSystemResource;
class KPageTableBase {
YUZU_NON_COPYABLE(KPageTableBase);
YUZU_NON_MOVEABLE(KPageTableBase);
CITRON_NON_COPYABLE(KPageTableBase);
CITRON_NON_MOVEABLE(KPageTableBase);
public:
using TraversalEntry = Common::PageTable::TraversalEntry;

View File

@@ -33,8 +33,8 @@ class KScopedSchedulerLockAndSleep;
class KScheduler final {
public:
YUZU_NON_COPYABLE(KScheduler);
YUZU_NON_MOVEABLE(KScheduler);
CITRON_NON_COPYABLE(KScheduler);
CITRON_NON_MOVEABLE(KScheduler);
using LockType = KAbstractSchedulerLock<KScheduler>;

View File

@@ -18,8 +18,8 @@ class KernelCore;
namespace impl {
class KSlabHeapImpl {
YUZU_NON_COPYABLE(KSlabHeapImpl);
YUZU_NON_MOVEABLE(KSlabHeapImpl);
CITRON_NON_COPYABLE(KSlabHeapImpl);
CITRON_NON_MOVEABLE(KSlabHeapImpl);
public:
struct Node {
@@ -72,8 +72,8 @@ private:
template <bool SupportDynamicExpansion>
class KSlabHeapBase : protected impl::KSlabHeapImpl {
YUZU_NON_COPYABLE(KSlabHeapBase);
YUZU_NON_MOVEABLE(KSlabHeapBase);
CITRON_NON_COPYABLE(KSlabHeapBase);
CITRON_NON_MOVEABLE(KSlabHeapBase);
private:
size_t m_obj_size{};

View File

@@ -14,8 +14,8 @@ class KSpinLock {
public:
explicit KSpinLock() = default;
YUZU_NON_COPYABLE(KSpinLock);
YUZU_NON_MOVEABLE(KSpinLock);
CITRON_NON_COPYABLE(KSpinLock);
CITRON_NON_MOVEABLE(KSpinLock);
void Lock();
void Unlock();

View File

@@ -26,8 +26,8 @@ public:
PhysicalCore(KernelCore& kernel, std::size_t core_index);
~PhysicalCore();
YUZU_NON_COPYABLE(PhysicalCore);
YUZU_NON_MOVEABLE(PhysicalCore);
CITRON_NON_COPYABLE(PhysicalCore);
CITRON_NON_MOVEABLE(PhysicalCore);
// Execute guest code running on the given thread.
void RunThread(KThread* thread);

View File

@@ -298,8 +298,8 @@ private:
namespace ResultImpl {
template <auto EvaluateResult, class F>
class ScopedResultGuard {
YUZU_NON_COPYABLE(ScopedResultGuard);
YUZU_NON_MOVEABLE(ScopedResultGuard);
CITRON_NON_COPYABLE(ScopedResultGuard);
CITRON_NON_MOVEABLE(ScopedResultGuard);
private:
Result& m_ref;

View File

@@ -11,7 +11,7 @@ namespace Service::JIT {
class CodeMemory {
public:
YUZU_NON_COPYABLE(CodeMemory);
CITRON_NON_COPYABLE(CodeMemory);
explicit CodeMemory() = default;

View File

@@ -13,8 +13,8 @@
#include "core/memory.h"
#include "video_core/host1x/host1x.h"
using Core::Memory::YUZU_PAGESIZE;
constexpr size_t BIG_PAGE_SIZE = YUZU_PAGESIZE * 16;
using Core::Memory::CITRON_PAGESIZE;
constexpr size_t BIG_PAGE_SIZE = CITRON_PAGESIZE * 16;
namespace Service::Nvidia::NvCore {
NvMap::Handle::Handle(u64 size_, Id id_)
@@ -32,7 +32,7 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress,
flags = pFlags;
kind = pKind;
align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign;
align = pAlign < CITRON_PAGESIZE ? CITRON_PAGESIZE : pAlign;
session_id = pSessionId;
// This flag is only applicable for handles with an address passed
@@ -43,7 +43,7 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress,
"Mapping nvmap handles without a CPU side address is unimplemented!");
}
size = Common::AlignUp(size, YUZU_PAGESIZE);
size = Common::AlignUp(size, CITRON_PAGESIZE);
aligned_size = Common::AlignUp(size, align);
address = pAddress;
allocated = true;

View File

@@ -152,7 +152,7 @@ NvResult nvhost_as_gpu::AllocateSpace(IoctlAllocSpace& params) {
return NvResult::BadValue;
}
if (params.page_size != VM::YUZU_PAGESIZE && params.page_size != vm.big_page_size) {
if (params.page_size != VM::CITRON_PAGESIZE && params.page_size != vm.big_page_size) {
return NvResult::BadValue;
}
@@ -162,10 +162,10 @@ NvResult nvhost_as_gpu::AllocateSpace(IoctlAllocSpace& params) {
return NvResult::NotImplemented;
}
const u32 page_size_bits{params.page_size == VM::YUZU_PAGESIZE ? VM::PAGE_SIZE_BITS
const u32 page_size_bits{params.page_size == VM::CITRON_PAGESIZE ? VM::PAGE_SIZE_BITS
: vm.big_page_size_bits};
auto& allocator{params.page_size == VM::YUZU_PAGESIZE ? *vm.small_page_allocator
auto& allocator{params.page_size == VM::CITRON_PAGESIZE ? *vm.small_page_allocator
: *vm.big_page_allocator};
if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) {
@@ -189,7 +189,7 @@ NvResult nvhost_as_gpu::AllocateSpace(IoctlAllocSpace& params) {
.mappings{},
.page_size = params.page_size,
.sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None,
.big_pages = params.page_size != VM::YUZU_PAGESIZE,
.big_pages = params.page_size != VM::CITRON_PAGESIZE,
};
return NvResult::Success;
@@ -201,7 +201,7 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
if (!mapping->fixed) {
auto& allocator{mapping->big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
u32 page_size_bits{mapping->big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
u32 page_size{mapping->big_page ? vm.big_page_size : VM::YUZU_PAGESIZE};
u32 page_size{mapping->big_page ? vm.big_page_size : VM::CITRON_PAGESIZE};
u64 aligned_size{Common::AlignUp(mapping->size, page_size)};
allocator.Free(static_cast<u32>(mapping->offset >> page_size_bits),
@@ -248,9 +248,9 @@ NvResult nvhost_as_gpu::FreeSpace(IoctlFreeSpace& params) {
gmmu->Unmap(params.offset, allocation.size);
}
auto& allocator{params.page_size == VM::YUZU_PAGESIZE ? *vm.small_page_allocator
auto& allocator{params.page_size == VM::CITRON_PAGESIZE ? *vm.small_page_allocator
: *vm.big_page_allocator};
u32 page_size_bits{params.page_size == VM::YUZU_PAGESIZE ? VM::PAGE_SIZE_BITS
u32 page_size_bits{params.page_size == VM::CITRON_PAGESIZE ? VM::PAGE_SIZE_BITS
: vm.big_page_size_bits};
allocator.Free(static_cast<u32>(params.offset >> page_size_bits),
@@ -360,7 +360,7 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
bool big_page{[&]() {
if (Common::IsAligned(handle->align, vm.big_page_size)) {
return true;
} else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) {
} else if (Common::IsAligned(handle->align, VM::CITRON_PAGESIZE)) {
return false;
} else {
ASSERT(false);
@@ -387,7 +387,7 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
mapping_map[params.offset] = mapping;
} else {
auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE};
u32 page_size{big_page ? vm.big_page_size : VM::CITRON_PAGESIZE};
u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
params.offset = static_cast<u64>(allocator.Allocate(
@@ -461,7 +461,7 @@ void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) {
params.regions = std::array<VaRegion, 2>{
VaRegion{
.offset = vm.small_page_allocator->GetVAStart() << VM::PAGE_SIZE_BITS,
.page_size = VM::YUZU_PAGESIZE,
.page_size = VM::CITRON_PAGESIZE,
._pad0_{},
.pages = vm.small_page_allocator->GetVALimit() - vm.small_page_allocator->GetVAStart(),
},

View File

@@ -190,8 +190,8 @@ private:
std::mutex mutex; //!< Locks all AS operations
struct VM {
static constexpr u32 YUZU_PAGESIZE{0x1000};
static constexpr u32 PAGE_SIZE_BITS{std::countr_zero(YUZU_PAGESIZE)};
static constexpr u32 CITRON_PAGESIZE{0x1000};
static constexpr u32 PAGE_SIZE_BITS{std::countr_zero(CITRON_PAGESIZE)};
static constexpr u32 SUPPORTED_BIG_PAGE_SIZES{0x30000};
static constexpr u32 DEFAULT_BIG_PAGE_SIZE{0x20000};

View File

@@ -17,7 +17,7 @@
#include "core/hle/service/nvdrv/devices/nvmap.h"
#include "core/memory.h"
using Core::Memory::YUZU_PAGESIZE;
using Core::Memory::CITRON_PAGESIZE;
namespace Service::Nvidia::Devices {
@@ -82,7 +82,7 @@ NvResult nvmap::IocCreate(IocCreateParams& params) {
std::shared_ptr<NvCore::NvMap::Handle> handle_description{};
auto result =
file.CreateHandle(Common::AlignUp(params.size, YUZU_PAGESIZE), handle_description);
file.CreateHandle(Common::AlignUp(params.size, CITRON_PAGESIZE), handle_description);
if (result != NvResult::Success) {
LOG_CRITICAL(Service_NVDRV, "Failed to create Object");
return result;
@@ -108,8 +108,8 @@ NvResult nvmap::IocAlloc(IocAllocParams& params, DeviceFD fd) {
}
// Force page size alignment at a minimum
if (params.align < YUZU_PAGESIZE) {
params.align = YUZU_PAGESIZE;
if (params.align < CITRON_PAGESIZE) {
params.align = CITRON_PAGESIZE;
}
auto handle_description{file.GetHandle(params.handle)};

View File

@@ -228,10 +228,10 @@ struct ProcessContext {
R_UNLESS(bss_size == expected_bss_size, RO::ResultInvalidNro);
// Validate all sizes are aligned.
R_UNLESS(Common::IsAligned(text_size, Core::Memory::YUZU_PAGESIZE), RO::ResultInvalidNro);
R_UNLESS(Common::IsAligned(ro_size, Core::Memory::YUZU_PAGESIZE), RO::ResultInvalidNro);
R_UNLESS(Common::IsAligned(rw_size, Core::Memory::YUZU_PAGESIZE), RO::ResultInvalidNro);
R_UNLESS(Common::IsAligned(bss_size, Core::Memory::YUZU_PAGESIZE), RO::ResultInvalidNro);
R_UNLESS(Common::IsAligned(text_size, Core::Memory::CITRON_PAGESIZE), RO::ResultInvalidNro);
R_UNLESS(Common::IsAligned(ro_size, Core::Memory::CITRON_PAGESIZE), RO::ResultInvalidNro);
R_UNLESS(Common::IsAligned(rw_size, Core::Memory::CITRON_PAGESIZE), RO::ResultInvalidNro);
R_UNLESS(Common::IsAligned(bss_size, Core::Memory::CITRON_PAGESIZE), RO::ResultInvalidNro);
// Validate sections are in order.
R_UNLESS(text_ofs <= ro_ofs, RO::ResultInvalidNro);
@@ -286,16 +286,16 @@ private:
};
Result ValidateAddressAndNonZeroSize(u64 address, u64 size) {
R_UNLESS(Common::IsAligned(address, Core::Memory::YUZU_PAGESIZE), RO::ResultInvalidAddress);
R_UNLESS(Common::IsAligned(address, Core::Memory::CITRON_PAGESIZE), RO::ResultInvalidAddress);
R_UNLESS(size != 0, RO::ResultInvalidSize);
R_UNLESS(Common::IsAligned(size, Core::Memory::YUZU_PAGESIZE), RO::ResultInvalidSize);
R_UNLESS(Common::IsAligned(size, Core::Memory::CITRON_PAGESIZE), RO::ResultInvalidSize);
R_UNLESS(address < address + size, RO::ResultInvalidSize);
R_SUCCEED();
}
Result ValidateAddressAndSize(u64 address, u64 size) {
R_UNLESS(Common::IsAligned(address, Core::Memory::YUZU_PAGESIZE), RO::ResultInvalidAddress);
R_UNLESS(Common::IsAligned(size, Core::Memory::YUZU_PAGESIZE), RO::ResultInvalidSize);
R_UNLESS(Common::IsAligned(address, Core::Memory::CITRON_PAGESIZE), RO::ResultInvalidAddress);
R_UNLESS(Common::IsAligned(size, Core::Memory::CITRON_PAGESIZE), RO::ResultInvalidSize);
R_UNLESS(size == 0 || address < address + size, RO::ResultInvalidSize);
R_SUCCEED();
}
@@ -369,7 +369,7 @@ public:
ASSERT(context != nullptr);
// Validate address.
R_UNLESS(Common::IsAligned(nrr_address, Core::Memory::YUZU_PAGESIZE),
R_UNLESS(Common::IsAligned(nrr_address, Core::Memory::CITRON_PAGESIZE),
RO::ResultInvalidAddress);
// Check the NRR is loaded.
@@ -436,7 +436,7 @@ public:
ASSERT(context != nullptr);
// Validate address.
R_UNLESS(Common::IsAligned(nro_address, Core::Memory::YUZU_PAGESIZE),
R_UNLESS(Common::IsAligned(nro_address, Core::Memory::CITRON_PAGESIZE),
RO::ResultInvalidAddress);
// Check the NRO is loaded.

View File

@@ -932,7 +932,7 @@ Result ISystemSettingsServer::SetPrimaryAlbumStorage(PrimaryAlbumStorage primary
Result ISystemSettingsServer::GetBatteryLot(Out<BatteryLot> out_battery_lot) {
LOG_INFO(Service_SET, "called");
*out_battery_lot = {"YUZU0EMULATOR14022024"};
*out_battery_lot = {"CITRON0EMULATOR14022024"};
R_SUCCEED();
}

View File

@@ -23,7 +23,7 @@ template <typename T>
struct CFReleaser {
T ptr;
YUZU_NON_COPYABLE(CFReleaser);
CITRON_NON_COPYABLE(CFReleaser);
constexpr CFReleaser() : ptr(nullptr) {}
constexpr CFReleaser(T ptr) : ptr(ptr) {}
constexpr operator T() {

View File

@@ -23,7 +23,7 @@ namespace {
Result AllocateSharedBufferMemory(std::unique_ptr<Kernel::KPageGroup>* out_page_group,
Core::System& system, u32 size) {
using Core::Memory::YUZU_PAGESIZE;
using Core::Memory::CITRON_PAGESIZE;
// Allocate memory for the system shared buffer.
auto& kernel = system.Kernel();
@@ -34,7 +34,7 @@ Result AllocateSharedBufferMemory(std::unique_ptr<Kernel::KPageGroup>* out_page_
// Allocate memory from secure pool.
R_TRY(kernel.MemoryManager().AllocateAndOpen(
pg.get(), size / YUZU_PAGESIZE,
pg.get(), size / CITRON_PAGESIZE,
Kernel::KMemoryManager::EncodeOption(Kernel::KMemoryManager::Pool::Secure,
Kernel::KMemoryManager::Direction::FromBack)));
@@ -58,13 +58,13 @@ Result AllocateSharedBufferMemory(std::unique_ptr<Kernel::KPageGroup>* out_page_
Result MapSharedBufferIntoProcessAddressSpace(Common::ProcessAddress* out_map_address,
std::unique_ptr<Kernel::KPageGroup>& pg,
Kernel::KProcess* process, Core::System& system) {
using Core::Memory::YUZU_PAGESIZE;
using Core::Memory::CITRON_PAGESIZE;
auto& page_table = process->GetPageTable();
// Get bounds of where mapping is possible.
const VAddr alias_code_begin = GetInteger(page_table.GetAliasCodeRegionStart());
const VAddr alias_code_size = page_table.GetAliasCodeRegionSize() / YUZU_PAGESIZE;
const VAddr alias_code_size = page_table.GetAliasCodeRegionSize() / CITRON_PAGESIZE;
const auto state = Kernel::KMemoryState::IoMemory;
const auto perm = Kernel::KMemoryPermission::UserReadWrite;
std::mt19937_64 rng{process->GetRandomEntropy(0)};
@@ -73,7 +73,7 @@ Result MapSharedBufferIntoProcessAddressSpace(Common::ProcessAddress* out_map_ad
Result res = ResultSuccess;
int i;
for (i = 0; i < 64; i++) {
*out_map_address = alias_code_begin + ((rng() % alias_code_size) * YUZU_PAGESIZE);
*out_map_address = alias_code_begin + ((rng() % alias_code_size) * CITRON_PAGESIZE);
res = page_table.MapPageGroup(*out_map_address, *pg, state, perm);
if (R_SUCCEEDED(res)) {
break;