Come descritto nel titolo, quando si tenta di eseguire le seguenti Divsion ottengo due risultati diversi a seconda dell'architettura del dispositivo:Objective-C divisione da 32 dispositivo/64-bit produce diversi risultati
unsigned int a = 42033;
unsigned int b = 360;
unsigned int c = 466
double result = a/(double)(b * c);
// on arm64 -> result = 0.25055436337625181
// on armv7 -> result = 0.24986030696800732
Perché i risultati non corrispondono?
Secondo Apple 64-Bit Transition Guide for Cocoa Touch questi tipi di dati hanno le stesse dimensioni in runtime a 32 e 64 bit.
EDIT
Il codice completo:
#import "UIImage+MyCategory.h"
#define CLIP_THRESHOLD 0.74 // if this much of the image is the clip color, leave it alone
typedef struct {
unsigned int leftNonColorIndex;
unsigned int rightNonColorIndex;
unsigned int nonColorCount;
} scanLineResult;
static inline scanLineResult scanOneLine(unsigned int *scanline, unsigned int count, unsigned int color, unsigned int mask) {
scanLineResult result = {UINT32_MAX, 0, 0};
for (int i = 0; i < count; i++) {
if ((*scanline++ & mask) != color) {
result.nonColorCount++;
result.leftNonColorIndex = MIN(result.leftNonColorIndex, i);
result.rightNonColorIndex = MAX(result.rightNonColorIndex, i);
}
}
return result;
}
typedef struct {
unsigned int leftNonColorIndex;
unsigned int topNonColorIndex;
unsigned int rightNonColorIndex;
unsigned int bottomNonColorIndex;
unsigned int nonColorCount;
double colorRatio;
} colorBoundaries;
static colorBoundaries findTrimColorBoundaries(unsigned int *buffer,
unsigned int width,
unsigned int height,
unsigned int bytesPerRow,
unsigned int color,
unsigned int mask)
{
colorBoundaries result = {UINT32_MAX, UINT32_MAX, 0, 0, 0.0};
unsigned int *currentLine = buffer;
for (int i = 0; i < height; i++) {
scanLineResult lineResult = scanOneLine(currentLine, width, color, mask);
if (lineResult.nonColorCount) {
result.nonColorCount += lineResult.nonColorCount;
result.topNonColorIndex = MIN(result.topNonColorIndex, i);
result.bottomNonColorIndex = MAX(result.bottomNonColorIndex, i);
result.leftNonColorIndex = MIN(result.leftNonColorIndex, lineResult.leftNonColorIndex);
result.rightNonColorIndex = MAX(result.rightNonColorIndex, lineResult.rightNonColorIndex);
}
currentLine = (unsigned int *)((char *)currentLine + bytesPerRow);
}
double delta = result.nonColorCount/(double)(width * height);
result.colorRatio = 1.0 - delta;
return result;
}
@implementation UIImage (MyCategory)
- (UIImage *)crop:(CGRect)rect {
rect = CGRectMake(rect.origin.x * self.scale,
rect.origin.y * self.scale,
rect.size.width * self.scale,
rect.size.height * self.scale);
CGImageRef imageRef = CGImageCreateWithImageInRect([self CGImage], rect);
UIImage *result = [UIImage imageWithCGImage:imageRef
scale:self.scale
orientation:self.imageOrientation];
CGImageRelease(imageRef);
return result;
}
- (UIImage*)trimWhiteBorders {
#ifdef __BIG_ENDIAN__
// undefined
#else
const unsigned int whiteXRGB = 0x00ffffff;
// Which bits to actually check
const unsigned int maskXRGB = 0x00ffffff;
#endif
CGImageRef image = [self CGImage];
CGBitmapInfo bitmapInfo = CGImageGetBitmapInfo(image);
// Only support default image formats
if (bitmapInfo != (kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Host))
return nil;
CGDataProviderRef dataProvider = CGImageGetDataProvider(image);
CFDataRef imageData = CGDataProviderCopyData(dataProvider);
colorBoundaries result = findTrimColorBoundaries((unsigned int *)CFDataGetBytePtr(imageData),
(unsigned int)CGImageGetWidth(image),
(unsigned int)CGImageGetHeight(image),
(unsigned int)CGImageGetBytesPerRow(image),
whiteXRGB,
maskXRGB);
CFRelease(imageData);
if (result.nonColorCount == 0 || result.colorRatio > CLIP_THRESHOLD)
return self;
CGRect trimRect = CGRectMake(result.leftNonColorIndex,
result.topNonColorIndex,
result.rightNonColorIndex - result.leftNonColorIndex + 1,
result.bottomNonColorIndex - result.topNonColorIndex + 1);
return [self crop:trimRect];
}
@end
C'è qualcosa di sospetto nel risultato di armv7: inserisci il codice effettivo che hai usato, la riga di comando di compilazione, la versione del compilatore, ecc. Inoltre, hai usato 'double' o hai usato' CGFloat'? –
Ho modificato la domanda, ho utilizzato Apple LLVM 6.0 con Xcode 6.2 – maross
C'è una domanda simile, ma per le build di Visual Studio (PC): http://stackoverflow.com/questions/22710272/difference-in-floating- point-aritmetica-tra-86-e-x64. Potrebbe essere che una spiegazione simile si applichi anche alla famiglia di processori. – Cristik